Subversion Repositories Kolibri OS

Rev

Rev 6937 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2325 Serge 1
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2
 */
3
/*
4
 *
5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6
 * All Rights Reserved.
7
 *
8
 * Permission is hereby granted, free of charge, to any person obtaining a
9
 * copy of this software and associated documentation files (the
10
 * "Software"), to deal in the Software without restriction, including
11
 * without limitation the rights to use, copy, modify, merge, publish,
12
 * distribute, sub license, and/or sell copies of the Software, and to
13
 * permit persons to whom the Software is furnished to do so, subject to
14
 * the following conditions:
15
 *
16
 * The above copyright notice and this permission notice (including the
17
 * next paragraph) shall be included in all copies or substantial portions
18
 * of the Software.
19
 *
20
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
 *
28
 */
29
 
6084 serge 30
#include 
6660 serge 31
#include 
3031 serge 32
#include 
33
#include 
2330 Serge 34
#include "i915_drv.h"
4126 Serge 35
#include "i915_trace.h"
2330 Serge 36
#include "intel_drv.h"
2325 Serge 37
 
38
#include 
6084 serge 39
#include 
7144 serge 40
#include 
41
#include 
3031 serge 42
#include 
43
 
2325 Serge 44
#include 
45
 
6937 serge 46
int init_display_kms(struct drm_device *dev);
47
 
48
extern int intel_agp_enabled;
49
 
5060 serge 50
static struct drm_driver driver;
2330 Serge 51
 
5060 serge 52
#define GEN_DEFAULT_PIPEOFFSETS \
53
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
54
			  PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
55
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
56
			   TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
57
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
3031 serge 58
 
5060 serge 59
#define GEN_CHV_PIPEOFFSETS \
60
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
61
			  CHV_PIPE_C_OFFSET }, \
62
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
63
			   CHV_TRANSCODER_C_OFFSET, }, \
64
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
65
			     CHV_PALETTE_C_OFFSET }
3031 serge 66
 
5060 serge 67
#define CURSOR_OFFSETS \
68
	.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
2330 Serge 69
 
5060 serge 70
#define IVB_CURSOR_OFFSETS \
71
	.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
2330 Serge 72
 
73
 
3031 serge 74
 
4104 Serge 75
 
2339 Serge 76
static const struct intel_device_info intel_i915g_info = {
3746 Serge 77
	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
2339 Serge 78
	.has_overlay = 1, .overlay_needs_physical = 1,
4560 Serge 79
	.ring_mask = RENDER_RING,
5060 serge 80
	GEN_DEFAULT_PIPEOFFSETS,
81
	CURSOR_OFFSETS,
2339 Serge 82
};
83
static const struct intel_device_info intel_i915gm_info = {
3746 Serge 84
	.gen = 3, .is_mobile = 1, .num_pipes = 2,
2339 Serge 85
	.cursor_needs_physical = 1,
86
	.has_overlay = 1, .overlay_needs_physical = 1,
87
	.supports_tv = 1,
4560 Serge 88
	.has_fbc = 1,
89
	.ring_mask = RENDER_RING,
5060 serge 90
	GEN_DEFAULT_PIPEOFFSETS,
91
	CURSOR_OFFSETS,
2339 Serge 92
};
93
static const struct intel_device_info intel_i945g_info = {
3746 Serge 94
	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
2339 Serge 95
	.has_overlay = 1, .overlay_needs_physical = 1,
4560 Serge 96
	.ring_mask = RENDER_RING,
5060 serge 97
	GEN_DEFAULT_PIPEOFFSETS,
98
	CURSOR_OFFSETS,
2339 Serge 99
};
100
static const struct intel_device_info intel_i945gm_info = {
3746 Serge 101
	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
2339 Serge 102
	.has_hotplug = 1, .cursor_needs_physical = 1,
103
	.has_overlay = 1, .overlay_needs_physical = 1,
104
	.supports_tv = 1,
4560 Serge 105
	.has_fbc = 1,
106
	.ring_mask = RENDER_RING,
5060 serge 107
	GEN_DEFAULT_PIPEOFFSETS,
108
	CURSOR_OFFSETS,
2339 Serge 109
};
110
 
111
static const struct intel_device_info intel_i965g_info = {
3746 Serge 112
	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
2339 Serge 113
	.has_hotplug = 1,
114
	.has_overlay = 1,
4560 Serge 115
	.ring_mask = RENDER_RING,
5060 serge 116
	GEN_DEFAULT_PIPEOFFSETS,
117
	CURSOR_OFFSETS,
2339 Serge 118
};
119
 
120
static const struct intel_device_info intel_i965gm_info = {
3746 Serge 121
	.gen = 4, .is_crestline = 1, .num_pipes = 2,
2339 Serge 122
	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
123
	.has_overlay = 1,
124
	.supports_tv = 1,
4560 Serge 125
	.ring_mask = RENDER_RING,
5060 serge 126
	GEN_DEFAULT_PIPEOFFSETS,
127
	CURSOR_OFFSETS,
2339 Serge 128
};
129
 
130
static const struct intel_device_info intel_g33_info = {
3746 Serge 131
	.gen = 3, .is_g33 = 1, .num_pipes = 2,
2339 Serge 132
	.need_gfx_hws = 1, .has_hotplug = 1,
133
	.has_overlay = 1,
4560 Serge 134
	.ring_mask = RENDER_RING,
5060 serge 135
	GEN_DEFAULT_PIPEOFFSETS,
136
	CURSOR_OFFSETS,
2339 Serge 137
};
138
 
139
static const struct intel_device_info intel_g45_info = {
3746 Serge 140
	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
2339 Serge 141
	.has_pipe_cxsr = 1, .has_hotplug = 1,
4560 Serge 142
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 143
	GEN_DEFAULT_PIPEOFFSETS,
144
	CURSOR_OFFSETS,
2339 Serge 145
};
146
 
147
static const struct intel_device_info intel_gm45_info = {
3746 Serge 148
	.gen = 4, .is_g4x = 1, .num_pipes = 2,
2339 Serge 149
	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
150
	.has_pipe_cxsr = 1, .has_hotplug = 1,
151
	.supports_tv = 1,
4560 Serge 152
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 153
	GEN_DEFAULT_PIPEOFFSETS,
154
	CURSOR_OFFSETS,
2339 Serge 155
};
156
 
157
static const struct intel_device_info intel_pineview_info = {
3746 Serge 158
	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
2339 Serge 159
	.need_gfx_hws = 1, .has_hotplug = 1,
160
	.has_overlay = 1,
5060 serge 161
	GEN_DEFAULT_PIPEOFFSETS,
162
	CURSOR_OFFSETS,
2339 Serge 163
};
164
 
165
static const struct intel_device_info intel_ironlake_d_info = {
3746 Serge 166
	.gen = 5, .num_pipes = 2,
3031 serge 167
	.need_gfx_hws = 1, .has_hotplug = 1,
4560 Serge 168
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 169
	GEN_DEFAULT_PIPEOFFSETS,
170
	CURSOR_OFFSETS,
2339 Serge 171
};
172
 
173
static const struct intel_device_info intel_ironlake_m_info = {
3746 Serge 174
	.gen = 5, .is_mobile = 1, .num_pipes = 2,
2339 Serge 175
	.need_gfx_hws = 1, .has_hotplug = 1,
176
	.has_fbc = 1,
4560 Serge 177
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 178
	GEN_DEFAULT_PIPEOFFSETS,
179
	CURSOR_OFFSETS,
2339 Serge 180
};
181
 
2325 Serge 182
static const struct intel_device_info intel_sandybridge_d_info = {
3746 Serge 183
	.gen = 6, .num_pipes = 2,
2330 Serge 184
	.need_gfx_hws = 1, .has_hotplug = 1,
4560 Serge 185
	.has_fbc = 1,
186
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
3031 serge 187
	.has_llc = 1,
5060 serge 188
	GEN_DEFAULT_PIPEOFFSETS,
189
	CURSOR_OFFSETS,
2325 Serge 190
};
191
 
192
static const struct intel_device_info intel_sandybridge_m_info = {
3746 Serge 193
	.gen = 6, .is_mobile = 1, .num_pipes = 2,
2330 Serge 194
	.need_gfx_hws = 1, .has_hotplug = 1,
6084 serge 195
	.has_fbc = 1,
4560 Serge 196
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
3031 serge 197
	.has_llc = 1,
5060 serge 198
	GEN_DEFAULT_PIPEOFFSETS,
199
	CURSOR_OFFSETS,
2325 Serge 200
};
201
 
3746 Serge 202
#define GEN7_FEATURES  \
203
	.gen = 7, .num_pipes = 3, \
204
	.need_gfx_hws = 1, .has_hotplug = 1, \
4560 Serge 205
	.has_fbc = 1, \
206
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
6937 serge 207
	.has_llc = 1, \
208
	GEN_DEFAULT_PIPEOFFSETS, \
209
	IVB_CURSOR_OFFSETS
3746 Serge 210
 
2339 Serge 211
static const struct intel_device_info intel_ivybridge_d_info = {
3746 Serge 212
	GEN7_FEATURES,
213
	.is_ivybridge = 1,
2339 Serge 214
};
2325 Serge 215
 
2339 Serge 216
static const struct intel_device_info intel_ivybridge_m_info = {
3746 Serge 217
	GEN7_FEATURES,
218
	.is_ivybridge = 1,
219
	.is_mobile = 1,
2339 Serge 220
};
221
 
3746 Serge 222
static const struct intel_device_info intel_ivybridge_q_info = {
223
	GEN7_FEATURES,
224
	.is_ivybridge = 1,
225
	.num_pipes = 0, /* legal, last one wins */
226
};
227
 
6937 serge 228
#define VLV_FEATURES  \
229
	.gen = 7, .num_pipes = 2, \
230
	.need_gfx_hws = 1, .has_hotplug = 1, \
231
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
232
	.display_mmio_offset = VLV_DISPLAY_BASE, \
233
	GEN_DEFAULT_PIPEOFFSETS, \
234
	CURSOR_OFFSETS
235
 
3031 serge 236
static const struct intel_device_info intel_valleyview_m_info = {
6937 serge 237
	VLV_FEATURES,
238
	.is_valleyview = 1,
3746 Serge 239
	.is_mobile = 1,
3031 serge 240
};
241
 
242
static const struct intel_device_info intel_valleyview_d_info = {
6937 serge 243
	VLV_FEATURES,
3031 serge 244
	.is_valleyview = 1,
245
};
246
 
6937 serge 247
#define HSW_FEATURES  \
248
	GEN7_FEATURES, \
249
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
250
	.has_ddi = 1, \
251
	.has_fpga_dbg = 1
252
 
3031 serge 253
static const struct intel_device_info intel_haswell_d_info = {
6937 serge 254
	HSW_FEATURES,
3746 Serge 255
	.is_haswell = 1,
3031 serge 256
};
257
 
258
static const struct intel_device_info intel_haswell_m_info = {
6937 serge 259
	HSW_FEATURES,
3746 Serge 260
	.is_haswell = 1,
261
	.is_mobile = 1,
3031 serge 262
};
263
 
4560 Serge 264
static const struct intel_device_info intel_broadwell_d_info = {
6937 serge 265
	HSW_FEATURES,
266
	.gen = 8,
4560 Serge 267
};
268
 
269
static const struct intel_device_info intel_broadwell_m_info = {
6937 serge 270
	HSW_FEATURES,
271
	.gen = 8, .is_mobile = 1,
4560 Serge 272
};
273
 
5060 serge 274
static const struct intel_device_info intel_broadwell_gt3d_info = {
6937 serge 275
	HSW_FEATURES,
276
	.gen = 8,
5060 serge 277
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
278
};
279
 
280
static const struct intel_device_info intel_broadwell_gt3m_info = {
6937 serge 281
	HSW_FEATURES,
282
	.gen = 8, .is_mobile = 1,
5060 serge 283
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
284
};
285
 
286
static const struct intel_device_info intel_cherryview_info = {
287
	.gen = 8, .num_pipes = 3,
288
	.need_gfx_hws = 1, .has_hotplug = 1,
289
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
6937 serge 290
	.is_cherryview = 1,
5060 serge 291
	.display_mmio_offset = VLV_DISPLAY_BASE,
292
	GEN_CHV_PIPEOFFSETS,
293
	CURSOR_OFFSETS,
294
};
295
 
5354 serge 296
static const struct intel_device_info intel_skylake_info = {
6937 serge 297
	HSW_FEATURES,
5354 serge 298
	.is_skylake = 1,
6937 serge 299
	.gen = 9,
5354 serge 300
};
301
 
6084 serge 302
static const struct intel_device_info intel_skylake_gt3_info = {
6937 serge 303
	HSW_FEATURES,
6084 serge 304
	.is_skylake = 1,
6937 serge 305
	.gen = 9,
6084 serge 306
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
307
};
308
 
309
static const struct intel_device_info intel_broxton_info = {
310
	.is_preliminary = 1,
6937 serge 311
	.is_broxton = 1,
6084 serge 312
	.gen = 9,
313
	.need_gfx_hws = 1, .has_hotplug = 1,
314
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
315
	.num_pipes = 3,
316
	.has_ddi = 1,
317
	.has_fpga_dbg = 1,
318
	.has_fbc = 1,
319
	GEN_DEFAULT_PIPEOFFSETS,
320
	IVB_CURSOR_OFFSETS,
321
};
322
 
6937 serge 323
static const struct intel_device_info intel_kabylake_info = {
324
	HSW_FEATURES,
325
	.is_preliminary = 1,
326
	.is_kabylake = 1,
327
	.gen = 9,
328
};
329
 
330
static const struct intel_device_info intel_kabylake_gt3_info = {
331
	HSW_FEATURES,
332
	.is_preliminary = 1,
333
	.is_kabylake = 1,
334
	.gen = 9,
335
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
336
};
337
 
4104 Serge 338
/*
339
 * Make sure any device matches here are from most specific to most
340
 * general.  For example, since the Quanta match is based on the subsystem
341
 * and subvendor IDs, we need it to come before the more general IVB
342
 * PCI ID matches, otherwise we'll use the wrong info struct above.
343
 */
6937 serge 344
static const struct pci_device_id pciidlist[] = {
345
	INTEL_I915G_IDS(&intel_i915g_info),
346
	INTEL_I915GM_IDS(&intel_i915gm_info),
347
	INTEL_I945G_IDS(&intel_i945g_info),
348
	INTEL_I945GM_IDS(&intel_i945gm_info),
349
	INTEL_I965G_IDS(&intel_i965g_info),
350
	INTEL_G33_IDS(&intel_g33_info),
351
	INTEL_I965GM_IDS(&intel_i965gm_info),
352
	INTEL_GM45_IDS(&intel_gm45_info),
353
	INTEL_G45_IDS(&intel_g45_info),
354
	INTEL_PINEVIEW_IDS(&intel_pineview_info),
355
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
356
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
357
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
358
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
359
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
360
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
361
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
362
	INTEL_HSW_D_IDS(&intel_haswell_d_info),
363
	INTEL_HSW_M_IDS(&intel_haswell_m_info),
364
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),
365
	INTEL_VLV_D_IDS(&intel_valleyview_d_info),
366
	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
367
	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
368
	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
369
	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
370
	INTEL_CHV_IDS(&intel_cherryview_info),
371
	INTEL_SKL_GT1_IDS(&intel_skylake_info),
372
	INTEL_SKL_GT2_IDS(&intel_skylake_info),
373
	INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
374
	INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
375
	INTEL_BXT_IDS(&intel_broxton_info),
376
	INTEL_KBL_GT1_IDS(&intel_kabylake_info),
377
	INTEL_KBL_GT2_IDS(&intel_kabylake_info),
378
	INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
379
	INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
6084 serge 380
	{0, 0, 0}
2325 Serge 381
};
382
 
6937 serge 383
 
384
 
6084 serge 385
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
386
{
387
	enum intel_pch ret = PCH_NOP;
2325 Serge 388
 
6084 serge 389
	/*
390
	 * In a virtualized passthrough environment we can be in a
391
	 * setup where the ISA bridge is not able to be passed through.
392
	 * In this case, a south bridge can be emulated and we have to
393
	 * make an educated guess as to which PCH is really there.
394
	 */
395
 
396
	if (IS_GEN5(dev)) {
397
		ret = PCH_IBX;
398
		DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
399
	} else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
400
		ret = PCH_CPT;
401
		DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
402
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
403
		ret = PCH_LPT;
404
		DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
6937 serge 405
	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
6084 serge 406
		ret = PCH_SPT;
407
		DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
408
	}
409
 
410
	return ret;
411
}
412
 
2342 Serge 413
void intel_detect_pch(struct drm_device *dev)
2326 Serge 414
{
6084 serge 415
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 416
	struct pci_dev *pch = NULL;
2326 Serge 417
 
3746 Serge 418
	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
419
	 * (which really amounts to a PCH but no South Display).
420
	 */
421
	if (INTEL_INFO(dev)->num_pipes == 0) {
422
		dev_priv->pch_type = PCH_NOP;
423
		return;
424
	}
425
 
6084 serge 426
	/*
427
	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
428
	 * make graphics device passthrough work easy for VMM, that only
429
	 * need to expose ISA bridge to let driver know the real hardware
430
	 * underneath. This is a requirement from virtualization team.
4104 Serge 431
	 *
432
	 * In some virtualized environments (e.g. XEN), there is irrelevant
433
	 * ISA bridge in the system. To work reliably, we should scan trhough
434
	 * all the ISA bridge devices and check for the first match, instead
435
	 * of only checking the first one.
6084 serge 436
	 */
5060 serge 437
	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
6084 serge 438
		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
5060 serge 439
			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
3243 Serge 440
			dev_priv->pch_id = id;
2326 Serge 441
 
6084 serge 442
			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
443
				dev_priv->pch_type = PCH_IBX;
444
				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
3243 Serge 445
				WARN_ON(!IS_GEN5(dev));
6084 serge 446
			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
447
				dev_priv->pch_type = PCH_CPT;
448
				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
3243 Serge 449
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
6084 serge 450
			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
451
				/* PantherPoint is CPT compatible */
452
				dev_priv->pch_type = PCH_CPT;
4560 Serge 453
				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
3243 Serge 454
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
3031 serge 455
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
456
				dev_priv->pch_type = PCH_LPT;
457
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
6084 serge 458
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
459
				WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
3243 Serge 460
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
461
				dev_priv->pch_type = PCH_LPT;
462
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
6084 serge 463
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
464
				WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
5354 serge 465
			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
466
				dev_priv->pch_type = PCH_SPT;
467
				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
6937 serge 468
				WARN_ON(!IS_SKYLAKE(dev) &&
469
					!IS_KABYLAKE(dev));
5354 serge 470
			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
471
				dev_priv->pch_type = PCH_SPT;
472
				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
6937 serge 473
				WARN_ON(!IS_SKYLAKE(dev) &&
474
					!IS_KABYLAKE(dev));
6320 serge 475
			} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
476
				   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
477
				    pch->subsystem_vendor == 0x1af4 &&
478
				    pch->subsystem_device == 0x1100)) {
6084 serge 479
				dev_priv->pch_type = intel_virt_detect_pch(dev);
5060 serge 480
			} else
481
				continue;
482
 
4104 Serge 483
			break;
6084 serge 484
		}
485
	}
4104 Serge 486
	if (!pch)
5060 serge 487
		DRM_DEBUG_KMS("No PCH found.\n");
488
 
489
//	pci_dev_put(pch);
2326 Serge 490
}
491
 
3031 serge 492
bool i915_semaphore_is_enabled(struct drm_device *dev)
2326 Serge 493
{
3031 serge 494
	if (INTEL_INFO(dev)->gen < 6)
4560 Serge 495
		return false;
2326 Serge 496
 
5060 serge 497
	if (i915.semaphores >= 0)
498
		return i915.semaphores;
499
 
5354 serge 500
	/* TODO: make semaphores and Execlists play nicely together */
501
	if (i915.enable_execlists)
502
		return false;
503
 
4560 Serge 504
	/* Until we get further testing... */
5060 serge 505
	if (IS_GEN8(dev))
4560 Serge 506
		return false;
507
 
3031 serge 508
#ifdef CONFIG_INTEL_IOMMU
509
	/* Enable semaphores on SNB when IO remapping is off */
510
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
511
		return false;
512
#endif
2326 Serge 513
 
4560 Serge 514
	return true;
2326 Serge 515
}
516
 
4104 Serge 517
#if 0
6084 serge 518
 
5060 serge 519
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
520
{
521
	struct drm_device *dev = dev_priv->dev;
6937 serge 522
	struct intel_encoder *encoder;
5060 serge 523
 
524
	drm_modeset_lock_all(dev);
6937 serge 525
	for_each_intel_encoder(dev, encoder)
526
		if (encoder->suspend)
527
			encoder->suspend(encoder);
5060 serge 528
	drm_modeset_unlock_all(dev);
529
}
530
 
5354 serge 531
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
532
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
533
			      bool rpm_resume);
6084 serge 534
static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
5354 serge 535
 
6937 serge 536
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
537
{
538
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
539
	if (acpi_target_system_state() < ACPI_STATE_S3)
540
		return true;
541
#endif
542
	return false;
543
}
6084 serge 544
 
5354 serge 545
static int i915_drm_suspend(struct drm_device *dev)
4104 Serge 546
{
547
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 548
	pci_power_t opregion_target_state;
6084 serge 549
	int error;
2342 Serge 550
 
4104 Serge 551
	/* ignore lid events during suspend */
552
	mutex_lock(&dev_priv->modeset_restore_lock);
553
	dev_priv->modeset_restore = MODESET_SUSPENDED;
554
	mutex_unlock(&dev_priv->modeset_restore_lock);
2342 Serge 555
 
6937 serge 556
	disable_rpm_wakeref_asserts(dev_priv);
557
 
4104 Serge 558
	/* We do a lot of poking in a lot of registers, make sure they work
559
	 * properly. */
5060 serge 560
	intel_display_set_init_power(dev_priv, true);
2342 Serge 561
 
4104 Serge 562
	drm_kms_helper_poll_disable(dev);
2342 Serge 563
 
4104 Serge 564
	pci_save_state(dev->pdev);
2325 Serge 565
 
6084 serge 566
	error = i915_gem_suspend(dev);
567
	if (error) {
568
		dev_err(&dev->pdev->dev,
569
			"GEM idle failed, resume might fail\n");
6937 serge 570
		goto out;
6084 serge 571
	}
4104 Serge 572
 
6084 serge 573
	intel_guc_suspend(dev);
4104 Serge 574
 
6084 serge 575
	intel_suspend_gt_powersave(dev);
5354 serge 576
 
6084 serge 577
	intel_display_suspend(dev);
4104 Serge 578
 
6084 serge 579
	intel_dp_mst_suspend(dev);
5060 serge 580
 
6084 serge 581
	intel_runtime_pm_disable_interrupts(dev_priv);
582
	intel_hpd_cancel_work(dev_priv);
5060 serge 583
 
6084 serge 584
	intel_suspend_encoders(dev_priv);
5060 serge 585
 
6084 serge 586
	intel_suspend_hw(dev);
4104 Serge 587
 
4560 Serge 588
	i915_gem_suspend_gtt_mappings(dev);
589
 
4104 Serge 590
	i915_save_state(dev);
591
 
6937 serge 592
	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
5060 serge 593
	intel_opregion_notify_adapter(dev, opregion_target_state);
594
 
595
	intel_uncore_forcewake_reset(dev, false);
4104 Serge 596
	intel_opregion_fini(dev);
597
 
5354 serge 598
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
4104 Serge 599
 
5060 serge 600
	dev_priv->suspend_count++;
601
 
602
	intel_display_set_init_power(dev_priv, false);
603
 
6937 serge 604
	if (HAS_CSR(dev_priv))
605
		flush_work(&dev_priv->csr.work);
606
 
607
out:
608
	enable_rpm_wakeref_asserts(dev_priv);
609
 
610
	return error;
4104 Serge 611
}
612
 
6084 serge 613
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
2325 Serge 614
{
5354 serge 615
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
6937 serge 616
	bool fw_csr;
5354 serge 617
	int ret;
618
 
6937 serge 619
	disable_rpm_wakeref_asserts(dev_priv);
620
 
621
	fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
622
	/*
623
	 * In case of firmware assisted context save/restore don't manually
624
	 * deinit the power domains. This also means the CSR/DMC firmware will
625
	 * stay active, it will power down any HW resources as required and
626
	 * also enable deeper system power states that would be blocked if the
627
	 * firmware was inactive.
628
	 */
629
	if (!fw_csr)
630
		intel_power_domains_suspend(dev_priv);
631
 
5354 serge 632
	ret = intel_suspend_complete(dev_priv);
633
 
634
	if (ret) {
635
		DRM_ERROR("Suspend complete failed: %d\n", ret);
6937 serge 636
		if (!fw_csr)
637
			intel_power_domains_init_hw(dev_priv, true);
5354 serge 638
 
6937 serge 639
		goto out;
5354 serge 640
	}
641
 
642
	pci_disable_device(drm_dev->pdev);
6084 serge 643
	/*
644
	 * During hibernation on some platforms the BIOS may try to access
645
	 * the device even though it's already in D3 and hang the machine. So
646
	 * leave the device in D0 on those platforms and hope the BIOS will
647
	 * power down the device properly. The issue was seen on multiple old
648
	 * GENs with different BIOS vendors, so having an explicit blacklist
649
	 * is inpractical; apply the workaround on everything pre GEN6. The
650
	 * platforms where the issue was seen:
651
	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
652
	 * Fujitsu FSC S7110
653
	 * Acer Aspire 1830T
654
	 */
655
	if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
656
		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
5354 serge 657
 
6937 serge 658
	dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
659
 
660
out:
661
	enable_rpm_wakeref_asserts(dev_priv);
662
 
663
	return ret;
5354 serge 664
}
665
 
6084 serge 666
int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
5354 serge 667
{
4104 Serge 668
	int error;
2325 Serge 669
 
4104 Serge 670
	if (!dev || !dev->dev_private) {
671
		DRM_ERROR("dev: %p\n", dev);
672
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
673
		return -ENODEV;
674
	}
2325 Serge 675
 
5354 serge 676
	if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
677
			 state.event != PM_EVENT_FREEZE))
678
		return -EINVAL;
3031 serge 679
 
4104 Serge 680
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
681
		return 0;
2325 Serge 682
 
5354 serge 683
	error = i915_drm_suspend(dev);
4104 Serge 684
	if (error)
685
		return error;
3031 serge 686
 
6084 serge 687
	return i915_drm_suspend_late(dev, false);
4104 Serge 688
}
2325 Serge 689
 
5354 serge 690
static int i915_drm_resume(struct drm_device *dev)
4104 Serge 691
{
5060 serge 692
	struct drm_i915_private *dev_priv = dev->dev_private;
3260 Serge 693
 
6937 serge 694
	disable_rpm_wakeref_asserts(dev_priv);
695
 
6084 serge 696
	mutex_lock(&dev->struct_mutex);
697
	i915_gem_restore_gtt_mappings(dev);
698
	mutex_unlock(&dev->struct_mutex);
4560 Serge 699
 
4104 Serge 700
	i915_restore_state(dev);
701
	intel_opregion_setup(dev);
702
 
6084 serge 703
	intel_init_pch_refclk(dev);
704
	drm_mode_config_reset(dev);
4104 Serge 705
 
6084 serge 706
	/*
707
	 * Interrupts have to be enabled before any batches are run. If not the
708
	 * GPU will hang. i915_gem_init_hw() will initiate batches to
709
	 * update/restore the context.
710
	 *
711
	 * Modeset enabling in intel_modeset_init_hw() also needs working
712
	 * interrupts.
713
	 */
714
	intel_runtime_pm_enable_interrupts(dev_priv);
4104 Serge 715
 
6084 serge 716
	mutex_lock(&dev->struct_mutex);
717
	if (i915_gem_init_hw(dev)) {
718
		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
719
			atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
720
	}
721
	mutex_unlock(&dev->struct_mutex);
4104 Serge 722
 
6084 serge 723
	intel_guc_resume(dev);
4104 Serge 724
 
6084 serge 725
	intel_modeset_init_hw(dev);
5060 serge 726
 
6084 serge 727
	spin_lock_irq(&dev_priv->irq_lock);
728
	if (dev_priv->display.hpd_irq_setup)
729
		dev_priv->display.hpd_irq_setup(dev);
730
	spin_unlock_irq(&dev_priv->irq_lock);
4104 Serge 731
 
7144 serge 732
	intel_dp_mst_resume(dev);
733
 
6084 serge 734
	intel_display_resume(dev);
5354 serge 735
 
6084 serge 736
	/*
737
	 * ... but also need to make sure that hotplug processing
738
	 * doesn't cause havoc. Like in the driver load code we don't
739
	 * bother with the tiny race here where we might loose hotplug
740
	 * notifications.
741
	 * */
742
	intel_hpd_init(dev_priv);
743
	/* Config may have changed between suspend and resume */
744
	drm_helper_hpd_irq_event(dev);
745
 
4104 Serge 746
	intel_opregion_init(dev);
747
 
5354 serge 748
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
4104 Serge 749
 
750
	mutex_lock(&dev_priv->modeset_restore_lock);
751
	dev_priv->modeset_restore = MODESET_DONE;
752
	mutex_unlock(&dev_priv->modeset_restore_lock);
4560 Serge 753
 
5060 serge 754
	intel_opregion_notify_adapter(dev, PCI_D0);
755
 
5354 serge 756
	drm_kms_helper_poll_enable(dev);
757
 
6937 serge 758
	enable_rpm_wakeref_asserts(dev_priv);
759
 
5060 serge 760
	return 0;
4104 Serge 761
}
762
 
5354 serge 763
static int i915_drm_resume_early(struct drm_device *dev)
4104 Serge 764
{
5354 serge 765
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 766
	int ret;
4104 Serge 767
 
5060 serge 768
	/*
769
	 * We have a resume ordering issue with the snd-hda driver also
770
	 * requiring our device to be power up. Due to the lack of a
771
	 * parent/child relationship we currently solve this with an early
772
	 * resume hook.
773
	 *
774
	 * FIXME: This should be solved with a special hdmi sink device or
775
	 * similar so that power domains can be employed.
776
	 */
4104 Serge 777
 
6937 serge 778
	/*
779
	 * Note that we need to set the power state explicitly, since we
780
	 * powered off the device during freeze and the PCI core won't power
781
	 * it back up for us during thaw. Powering off the device during
782
	 * freeze is not a hard requirement though, and during the
783
	 * suspend/resume phases the PCI core makes sure we get here with the
784
	 * device powered on. So in case we change our freeze logic and keep
785
	 * the device powered we can also remove the following set power state
786
	 * call.
787
	 */
788
	ret = pci_set_power_state(dev->pdev, PCI_D0);
789
	if (ret) {
790
		DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
791
		goto out;
792
	}
793
 
794
	/*
795
	 * Note that pci_enable_device() first enables any parent bridge
796
	 * device and only then sets the power state for this device. The
797
	 * bridge enabling is a nop though, since bridge devices are resumed
798
	 * first. The order of enabling power and enabling the device is
799
	 * imposed by the PCI core as described above, so here we preserve the
800
	 * same order for the freeze/thaw phases.
801
	 *
802
	 * TODO: eventually we should remove pci_disable_device() /
803
	 * pci_enable_enable_device() from suspend/resume. Due to how they
804
	 * depend on the device enable refcount we can't anyway depend on them
805
	 * disabling/enabling the device.
806
	 */
807
	if (pci_enable_device(dev->pdev)) {
808
		ret = -EIO;
809
		goto out;
810
	}
811
 
4104 Serge 812
	pci_set_master(dev->pdev);
813
 
6937 serge 814
	disable_rpm_wakeref_asserts(dev_priv);
815
 
816
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5354 serge 817
		ret = vlv_resume_prepare(dev_priv, false);
818
	if (ret)
6084 serge 819
		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
820
			  ret);
5354 serge 821
 
822
	intel_uncore_early_sanitize(dev, true);
823
 
6084 serge 824
	if (IS_BROXTON(dev))
825
		ret = bxt_resume_prepare(dev_priv);
826
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5354 serge 827
		hsw_disable_pc8(dev_priv);
828
 
829
	intel_uncore_sanitize(dev);
830
 
6937 serge 831
	if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
832
		intel_power_domains_init_hw(dev_priv, true);
833
 
834
out:
835
	dev_priv->suspended_to_idle = false;
836
 
837
	enable_rpm_wakeref_asserts(dev_priv);
838
 
5354 serge 839
	return ret;
5060 serge 840
}
841
 
6084 serge 842
int i915_resume_switcheroo(struct drm_device *dev)
5060 serge 843
{
844
	int ret;
845
 
5354 serge 846
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
847
		return 0;
848
 
849
	ret = i915_drm_resume_early(dev);
4104 Serge 850
	if (ret)
851
		return ret;
852
 
5354 serge 853
	return i915_drm_resume(dev);
4104 Serge 854
}
7144 serge 855
#endif
4104 Serge 856
 
857
/**
858
 * i915_reset - reset chip after a hang
859
 * @dev: drm device to reset
860
 *
861
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
862
 * reset or otherwise an error code.
863
 *
864
 * Procedure is fairly simple:
865
 *   - reset the chip using the reset reg
866
 *   - re-init context state
867
 *   - re-init hardware status page
868
 *   - re-init ring buffer
869
 *   - re-init interrupt state
870
 *   - re-init display
871
 */
872
int i915_reset(struct drm_device *dev)
873
{
5060 serge 874
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 875
	bool simulated;
876
	int ret;
877
 
6084 serge 878
	intel_reset_gt_powersave(dev);
4104 Serge 879
 
880
	mutex_lock(&dev->struct_mutex);
881
 
882
	i915_gem_reset(dev);
883
 
884
	simulated = dev_priv->gpu_error.stop_rings != 0;
885
 
6084 serge 886
	ret = intel_gpu_reset(dev);
4104 Serge 887
 
6084 serge 888
	/* Also reset the gpu hangman. */
889
	if (simulated) {
890
		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
891
		dev_priv->gpu_error.stop_rings = 0;
892
		if (ret == -ENODEV) {
4560 Serge 893
			DRM_INFO("Reset not implemented, but ignoring "
6084 serge 894
				 "error for simulated gpu hangs\n");
895
			ret = 0;
896
		}
4104 Serge 897
	}
4560 Serge 898
 
5354 serge 899
	if (i915_stop_ring_allow_warn(dev_priv))
900
		pr_notice("drm/i915: Resetting chip after gpu hang\n");
901
 
4104 Serge 902
	if (ret) {
4560 Serge 903
		DRM_ERROR("Failed to reset chip: %i\n", ret);
4104 Serge 904
		mutex_unlock(&dev->struct_mutex);
905
		return ret;
906
	}
907
 
7144 serge 908
//	intel_overlay_reset(dev_priv);
6937 serge 909
 
4104 Serge 910
	/* Ok, now get things going again... */
911
 
912
	/*
913
	 * Everything depends on having the GTT running, so we need to start
914
	 * there.  Fortunately we don't need to do this unless we reset the
915
	 * chip at a PCI level.
916
	 *
917
	 * Next we need to restore the context, but we don't use those
918
	 * yet either...
919
	 *
920
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
921
	 * was running at the time of the reset (i.e. we weren't VT
922
	 * switched away).
923
	 */
924
 
6084 serge 925
	/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
926
	dev_priv->gpu_error.reload_in_reset = true;
5354 serge 927
 
6084 serge 928
	ret = i915_gem_init_hw(dev);
5354 serge 929
 
6084 serge 930
	dev_priv->gpu_error.reload_in_reset = false;
4104 Serge 931
 
6084 serge 932
	mutex_unlock(&dev->struct_mutex);
933
	if (ret) {
934
		DRM_ERROR("Failed hw init on reset %d\n", ret);
935
		return ret;
4104 Serge 936
	}
937
 
6084 serge 938
	/*
939
	 * rps/rc6 re-init is necessary to restore state lost after the
940
	 * reset and the re-install of gt irqs. Skip for ironlake per
941
	 * previous concerns that it doesn't respond well to some forms
942
	 * of re-init after reset.
943
	 */
944
	if (INTEL_INFO(dev)->gen > 5)
945
		intel_enable_gt_powersave(dev);
946
 
4104 Serge 947
	return 0;
948
}
949
 
7144 serge 950
#if 0
4104 Serge 951
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
952
{
953
	struct intel_device_info *intel_info =
954
		(struct intel_device_info *) ent->driver_data;
955
 
5060 serge 956
	if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
4560 Serge 957
		DRM_INFO("This hardware requires preliminary hardware support.\n"
958
			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
959
		return -ENODEV;
960
	}
961
 
4104 Serge 962
	/* Only bind to function 0 of the device. Early generations
963
	 * used function 1 as a placeholder for multi-head. This causes
964
	 * us confusion instead, especially on the systems where both
965
	 * functions have the same PCI-ID!
966
	 */
967
	if (PCI_FUNC(pdev->devfn))
968
		return -ENODEV;
969
 
970
	return drm_get_pci_dev(pdev, ent, &driver);
971
}
972
 
973
static void
974
i915_pci_remove(struct pci_dev *pdev)
975
{
976
	struct drm_device *dev = pci_get_drvdata(pdev);
977
 
978
	drm_put_dev(dev);
979
}
980
 
981
static int i915_pm_suspend(struct device *dev)
982
{
983
	struct pci_dev *pdev = to_pci_dev(dev);
984
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
985
 
986
	if (!drm_dev || !drm_dev->dev_private) {
987
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
988
		return -ENODEV;
989
	}
990
 
991
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
992
		return 0;
993
 
5354 serge 994
	return i915_drm_suspend(drm_dev);
5060 serge 995
}
4104 Serge 996
 
5060 serge 997
static int i915_pm_suspend_late(struct device *dev)
998
{
6084 serge 999
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
5060 serge 1000
 
1001
	/*
6084 serge 1002
	 * We have a suspend ordering issue with the snd-hda driver also
5060 serge 1003
	 * requiring our device to be power up. Due to the lack of a
1004
	 * parent/child relationship we currently solve this with an late
1005
	 * suspend hook.
1006
	 *
1007
	 * FIXME: This should be solved with a special hdmi sink device or
1008
	 * similar so that power domains can be employed.
1009
	 */
1010
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1011
		return 0;
1012
 
6084 serge 1013
	return i915_drm_suspend_late(drm_dev, false);
4104 Serge 1014
}
1015
 
6084 serge 1016
static int i915_pm_poweroff_late(struct device *dev)
1017
{
1018
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1019
 
1020
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1021
		return 0;
1022
 
1023
	return i915_drm_suspend_late(drm_dev, true);
1024
}
1025
 
5060 serge 1026
static int i915_pm_resume_early(struct device *dev)
1027
{
6084 serge 1028
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
5060 serge 1029
 
5354 serge 1030
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1031
		return 0;
1032
 
1033
	return i915_drm_resume_early(drm_dev);
5060 serge 1034
}
1035
 
4104 Serge 1036
static int i915_pm_resume(struct device *dev)
1037
{
6084 serge 1038
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
4104 Serge 1039
 
5354 serge 1040
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1041
		return 0;
4104 Serge 1042
 
5354 serge 1043
	return i915_drm_resume(drm_dev);
4104 Serge 1044
}
1045
 
5354 serge 1046
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
5060 serge 1047
{
1048
	hsw_enable_pc8(dev_priv);
4560 Serge 1049
 
5060 serge 1050
	return 0;
1051
}
1052
 
6084 serge 1053
static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
1054
{
1055
	struct drm_device *dev = dev_priv->dev;
1056
 
1057
	/* TODO: when DC5 support is added disable DC5 here. */
1058
 
1059
	broxton_ddi_phy_uninit(dev);
1060
	broxton_uninit_cdclk(dev);
1061
	bxt_enable_dc9(dev_priv);
1062
 
1063
	return 0;
1064
}
1065
 
1066
static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
1067
{
1068
	struct drm_device *dev = dev_priv->dev;
1069
 
1070
	/* TODO: when CSR FW support is added make sure the FW is loaded */
1071
 
1072
	bxt_disable_dc9(dev_priv);
1073
 
1074
	/*
1075
	 * TODO: when DC5 support is added enable DC5 here if the CSR FW
1076
	 * is available.
1077
	 */
1078
	broxton_init_cdclk(dev);
1079
	broxton_ddi_phy_init(dev);
1080
 
1081
	return 0;
1082
}
1083
 
5060 serge 1084
/*
1085
 * Save all Gunit registers that may be lost after a D3 and a subsequent
1086
 * S0i[R123] transition. The list of registers needing a save/restore is
1087
 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
1088
 * registers in the following way:
1089
 * - Driver: saved/restored by the driver
1090
 * - Punit : saved/restored by the Punit firmware
1091
 * - No, w/o marking: no need to save/restore, since the register is R/O or
1092
 *                    used internally by the HW in a way that doesn't depend
1093
 *                    keeping the content across a suspend/resume.
1094
 * - Debug : used for debugging
1095
 *
1096
 * We save/restore all registers marked with 'Driver', with the following
1097
 * exceptions:
1098
 * - Registers out of use, including also registers marked with 'Debug'.
1099
 *   These have no effect on the driver's operation, so we don't save/restore
1100
 *   them to reduce the overhead.
1101
 * - Registers that are fully setup by an initialization function called from
1102
 *   the resume path. For example many clock gating and RPS/RC6 registers.
1103
 * - Registers that provide the right functionality with their reset defaults.
1104
 *
1105
 * TODO: Except for registers that based on the above 3 criteria can be safely
1106
 * ignored, we save/restore all others, practically treating the HW context as
1107
 * a black-box for the driver. Further investigation is needed to reduce the
1108
 * saved/restored registers even further, by following the same 3 criteria.
1109
 */
1110
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1111
{
1112
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1113
	int i;
1114
 
1115
	/* GAM 0x4000-0x4770 */
1116
	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
1117
	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
1118
	s->arb_mode		= I915_READ(ARB_MODE);
1119
	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
1120
	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);
1121
 
1122
	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
6084 serge 1123
		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
5060 serge 1124
 
1125
	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
6084 serge 1126
	s->gfx_max_req_count	= I915_READ(GEN7_GFX_MAX_REQ_COUNT);
5060 serge 1127
 
1128
	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
1129
	s->ecochk		= I915_READ(GAM_ECOCHK);
1130
	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
1131
	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);
1132
 
1133
	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);
1134
 
1135
	/* MBC 0x9024-0x91D0, 0x8500 */
1136
	s->g3dctl		= I915_READ(VLV_G3DCTL);
1137
	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
1138
	s->mbctl		= I915_READ(GEN6_MBCTL);
1139
 
1140
	/* GCP 0x9400-0x9424, 0x8100-0x810C */
1141
	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
1142
	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
1143
	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
1144
	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
1145
	s->rstctl		= I915_READ(GEN6_RSTCTL);
1146
	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);
1147
 
1148
	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1149
	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
1150
	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
1151
	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
1152
	s->ecobus		= I915_READ(ECOBUS);
1153
	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
1154
	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
1155
	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
1156
	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
1157
	s->rcedata		= I915_READ(VLV_RCEDATA);
1158
	s->spare2gh		= I915_READ(VLV_SPAREG2H);
1159
 
1160
	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1161
	s->gt_imr		= I915_READ(GTIMR);
1162
	s->gt_ier		= I915_READ(GTIER);
1163
	s->pm_imr		= I915_READ(GEN6_PMIMR);
1164
	s->pm_ier		= I915_READ(GEN6_PMIER);
1165
 
1166
	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
6084 serge 1167
		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
5060 serge 1168
 
1169
	/* GT SA CZ domain, 0x100000-0x138124 */
1170
	s->tilectl		= I915_READ(TILECTL);
1171
	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
1172
	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
1173
	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1174
	s->pmwgicz		= I915_READ(VLV_PMWGICZ);
1175
 
1176
	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
1177
	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
1178
	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
6084 serge 1179
	s->pcbr			= I915_READ(VLV_PCBR);
5060 serge 1180
	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);
1181
 
1182
	/*
1183
	 * Not saving any of:
1184
	 * DFT,		0x9800-0x9EC0
1185
	 * SARB,	0xB000-0xB1FC
1186
	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
1187
	 * PCI CFG
1188
	 */
1189
}
1190
 
1191
static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1192
{
1193
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1194
	u32 val;
1195
	int i;
1196
 
1197
	/* GAM 0x4000-0x4770 */
1198
	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
1199
	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
1200
	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
1201
	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
1202
	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);
1203
 
1204
	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
6084 serge 1205
		I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
5060 serge 1206
 
1207
	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
6084 serge 1208
	I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
5060 serge 1209
 
1210
	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
1211
	I915_WRITE(GAM_ECOCHK,		s->ecochk);
1212
	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
1213
	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);
1214
 
1215
	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);
1216
 
1217
	/* MBC 0x9024-0x91D0, 0x8500 */
1218
	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
1219
	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
1220
	I915_WRITE(GEN6_MBCTL,		s->mbctl);
1221
 
1222
	/* GCP 0x9400-0x9424, 0x8100-0x810C */
1223
	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
1224
	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
1225
	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
1226
	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
1227
	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
1228
	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);
1229
 
1230
	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1231
	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
1232
	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
1233
	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
1234
	I915_WRITE(ECOBUS,		s->ecobus);
1235
	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
1236
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
1237
	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
1238
	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
1239
	I915_WRITE(VLV_RCEDATA,		s->rcedata);
1240
	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);
1241
 
1242
	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1243
	I915_WRITE(GTIMR,		s->gt_imr);
1244
	I915_WRITE(GTIER,		s->gt_ier);
1245
	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
1246
	I915_WRITE(GEN6_PMIER,		s->pm_ier);
1247
 
1248
	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
6084 serge 1249
		I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
5060 serge 1250
 
1251
	/* GT SA CZ domain, 0x100000-0x138124 */
1252
	I915_WRITE(TILECTL,			s->tilectl);
1253
	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
1254
	/*
1255
	 * Preserve the GT allow wake and GFX force clock bit, they are not
1256
	 * be restored, as they are used to control the s0ix suspend/resume
1257
	 * sequence by the caller.
1258
	 */
1259
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
1260
	val &= VLV_GTLC_ALLOWWAKEREQ;
1261
	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
1262
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1263
 
1264
	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1265
	val &= VLV_GFX_CLK_FORCE_ON_BIT;
1266
	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
1267
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1268
 
1269
	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);
1270
 
1271
	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
1272
	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
1273
	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
6084 serge 1274
	I915_WRITE(VLV_PCBR,			s->pcbr);
5060 serge 1275
	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
1276
}
4104 Serge 1277
#endif
1278
 
5060 serge 1279
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1280
{
1281
	u32 val;
1282
	int err;
1283
 
1284
#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1285
 
1286
	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1287
	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1288
	if (force_on)
1289
		val |= VLV_GFX_CLK_FORCE_ON_BIT;
1290
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1291
 
1292
	if (!force_on)
1293
		return 0;
1294
 
1295
	err = wait_for(COND, 20);
1296
	if (err)
1297
		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1298
			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1299
 
1300
	return err;
1301
#undef COND
1302
}
1303
#if 0
1304
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
1305
{
1306
	u32 val;
1307
	int err = 0;
1308
 
1309
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
1310
	val &= ~VLV_GTLC_ALLOWWAKEREQ;
1311
	if (allow)
1312
		val |= VLV_GTLC_ALLOWWAKEREQ;
1313
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1314
	POSTING_READ(VLV_GTLC_WAKE_CTRL);
1315
 
1316
#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1317
	      allow)
1318
	err = wait_for(COND, 1);
1319
	if (err)
1320
		DRM_ERROR("timeout disabling GT waking\n");
1321
	return err;
1322
#undef COND
1323
}
1324
 
1325
static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1326
				 bool wait_for_on)
1327
{
1328
	u32 mask;
1329
	u32 val;
1330
	int err;
1331
 
1332
	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
1333
	val = wait_for_on ? mask : 0;
1334
#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1335
	if (COND)
1336
		return 0;
1337
 
1338
	DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
7144 serge 1339
		      onoff(wait_for_on),
1340
		      I915_READ(VLV_GTLC_PW_STATUS));
5060 serge 1341
 
1342
	/*
1343
	 * RC6 transitioning can be delayed up to 2 msec (see
1344
	 * valleyview_enable_rps), use 3 msec for safety.
1345
	 */
1346
	err = wait_for(COND, 3);
1347
	if (err)
1348
		DRM_ERROR("timeout waiting for GT wells to go %s\n",
7144 serge 1349
			  onoff(wait_for_on));
5060 serge 1350
 
1351
	return err;
1352
#undef COND
1353
}
1354
 
1355
static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
1356
{
1357
	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
1358
		return;
1359
 
7144 serge 1360
	DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
5060 serge 1361
	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1362
}
1363
 
5354 serge 1364
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
5060 serge 1365
{
1366
	u32 mask;
1367
	int err;
1368
 
1369
	/*
1370
	 * Bspec defines the following GT well on flags as debug only, so
1371
	 * don't treat them as hard failures.
1372
	 */
1373
	(void)vlv_wait_for_gt_wells(dev_priv, false);
1374
 
1375
	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
1376
	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
1377
 
1378
	vlv_check_no_gt_access(dev_priv);
1379
 
1380
	err = vlv_force_gfx_clock(dev_priv, true);
1381
	if (err)
1382
		goto err1;
1383
 
1384
	err = vlv_allow_gt_wake(dev_priv, false);
1385
	if (err)
1386
		goto err2;
1387
 
6084 serge 1388
	if (!IS_CHERRYVIEW(dev_priv->dev))
1389
		vlv_save_gunit_s0ix_state(dev_priv);
1390
 
5060 serge 1391
	err = vlv_force_gfx_clock(dev_priv, false);
1392
	if (err)
1393
		goto err2;
1394
 
1395
	return 0;
1396
 
1397
err2:
1398
	/* For safety always re-enable waking and disable gfx clock forcing */
1399
	vlv_allow_gt_wake(dev_priv, true);
1400
err1:
1401
	vlv_force_gfx_clock(dev_priv, false);
1402
 
1403
	return err;
1404
}
1405
 
5354 serge 1406
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1407
				bool rpm_resume)
5060 serge 1408
{
1409
	struct drm_device *dev = dev_priv->dev;
1410
	int err;
1411
	int ret;
1412
 
1413
	/*
1414
	 * If any of the steps fail just try to continue, that's the best we
1415
	 * can do at this point. Return the first error code (which will also
1416
	 * leave RPM permanently disabled).
1417
	 */
1418
	ret = vlv_force_gfx_clock(dev_priv, true);
1419
 
6084 serge 1420
	if (!IS_CHERRYVIEW(dev_priv->dev))
1421
		vlv_restore_gunit_s0ix_state(dev_priv);
5060 serge 1422
 
1423
	err = vlv_allow_gt_wake(dev_priv, true);
1424
	if (!ret)
1425
		ret = err;
1426
 
1427
	err = vlv_force_gfx_clock(dev_priv, false);
1428
	if (!ret)
1429
		ret = err;
1430
 
1431
	vlv_check_no_gt_access(dev_priv);
1432
 
5354 serge 1433
	if (rpm_resume) {
6084 serge 1434
		intel_init_clock_gating(dev);
1435
		i915_gem_restore_fences(dev);
5354 serge 1436
	}
5060 serge 1437
 
1438
	return ret;
1439
}
1440
 
1441
static int intel_runtime_suspend(struct device *device)
1442
{
1443
	struct pci_dev *pdev = to_pci_dev(device);
1444
	struct drm_device *dev = pci_get_drvdata(pdev);
1445
	struct drm_i915_private *dev_priv = dev->dev_private;
1446
	int ret;
1447
 
1448
	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1449
		return -ENODEV;
1450
 
5354 serge 1451
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1452
		return -ENODEV;
1453
 
5060 serge 1454
	DRM_DEBUG_KMS("Suspending device\n");
1455
 
1456
	/*
1457
	 * We could deadlock here in case another thread holding struct_mutex
1458
	 * calls RPM suspend concurrently, since the RPM suspend will wait
1459
	 * first for this RPM suspend to finish. In this case the concurrent
1460
	 * RPM resume will be followed by its RPM suspend counterpart. Still
1461
	 * for consistency return -EAGAIN, which will reschedule this suspend.
1462
	 */
1463
	if (!mutex_trylock(&dev->struct_mutex)) {
1464
		DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1465
		/*
1466
		 * Bump the expiration timestamp, otherwise the suspend won't
1467
		 * be rescheduled.
1468
		 */
1469
		pm_runtime_mark_last_busy(device);
1470
 
1471
		return -EAGAIN;
1472
	}
6937 serge 1473
 
1474
	disable_rpm_wakeref_asserts(dev_priv);
1475
 
5060 serge 1476
	/*
1477
	 * We are safe here against re-faults, since the fault handler takes
1478
	 * an RPM reference.
1479
	 */
1480
	i915_gem_release_all_mmaps(dev_priv);
1481
	mutex_unlock(&dev->struct_mutex);
1482
 
6937 serge 1483
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1484
 
6084 serge 1485
	intel_guc_suspend(dev);
1486
 
5354 serge 1487
	intel_suspend_gt_powersave(dev);
1488
	intel_runtime_pm_disable_interrupts(dev_priv);
5060 serge 1489
 
5354 serge 1490
	ret = intel_suspend_complete(dev_priv);
5060 serge 1491
	if (ret) {
1492
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
5354 serge 1493
		intel_runtime_pm_enable_interrupts(dev_priv);
5060 serge 1494
 
6937 serge 1495
		enable_rpm_wakeref_asserts(dev_priv);
1496
 
5060 serge 1497
		return ret;
1498
	}
1499
 
6084 serge 1500
	intel_uncore_forcewake_reset(dev, false);
6937 serge 1501
 
1502
	enable_rpm_wakeref_asserts(dev_priv);
1503
	WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
7144 serge 1504
 
1505
	if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
1506
		DRM_ERROR("Unclaimed access detected prior to suspending\n");
1507
 
5060 serge 1508
	dev_priv->pm.suspended = true;
1509
 
1510
	/*
5354 serge 1511
	 * FIXME: We really should find a document that references the arguments
1512
	 * used below!
1513
	 */
6084 serge 1514
	if (IS_BROADWELL(dev)) {
5354 serge 1515
		/*
1516
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1517
		 * being detected, and the call we do at intel_runtime_resume()
1518
		 * won't be able to restore them. Since PCI_D3hot matches the
6084 serge 1519
		 * actual specification and appears to be working, use it.
5354 serge 1520
		 */
1521
		intel_opregion_notify_adapter(dev, PCI_D3hot);
6084 serge 1522
	} else {
1523
		/*
1524
		 * current versions of firmware which depend on this opregion
1525
		 * notification have repurposed the D1 definition to mean
1526
		 * "runtime suspended" vs. what you would normally expect (D3)
1527
		 * to distinguish it from notifications that might be sent via
1528
		 * the suspend path.
1529
		 */
1530
		intel_opregion_notify_adapter(dev, PCI_D1);
5354 serge 1531
	}
5060 serge 1532
 
6084 serge 1533
	assert_forcewakes_inactive(dev_priv);
1534
 
5060 serge 1535
	DRM_DEBUG_KMS("Device suspended\n");
1536
	return 0;
1537
}
1538
 
1539
static int intel_runtime_resume(struct device *device)
1540
{
1541
	struct pci_dev *pdev = to_pci_dev(device);
1542
	struct drm_device *dev = pci_get_drvdata(pdev);
1543
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 1544
	int ret = 0;
5060 serge 1545
 
5354 serge 1546
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1547
		return -ENODEV;
5060 serge 1548
 
1549
	DRM_DEBUG_KMS("Resuming device\n");
1550
 
6937 serge 1551
	WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
1552
	disable_rpm_wakeref_asserts(dev_priv);
1553
 
5060 serge 1554
	intel_opregion_notify_adapter(dev, PCI_D0);
1555
	dev_priv->pm.suspended = false;
7144 serge 1556
	if (intel_uncore_unclaimed_mmio(dev_priv))
1557
		DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
5060 serge 1558
 
6084 serge 1559
	intel_guc_resume(dev);
1560
 
5354 serge 1561
	if (IS_GEN6(dev_priv))
1562
		intel_init_pch_refclk(dev);
6084 serge 1563
 
1564
	if (IS_BROXTON(dev))
1565
		ret = bxt_resume_prepare(dev_priv);
5354 serge 1566
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1567
		hsw_disable_pc8(dev_priv);
6937 serge 1568
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5354 serge 1569
		ret = vlv_resume_prepare(dev_priv, true);
5060 serge 1570
 
1571
	/*
1572
	 * No point of rolling back things in case of an error, as the best
1573
	 * we can do is to hope that things will still work (and disable RPM).
1574
	 */
1575
	i915_gem_init_swizzling(dev);
1576
	gen6_update_ring_freq(dev);
1577
 
5354 serge 1578
	intel_runtime_pm_enable_interrupts(dev_priv);
6084 serge 1579
 
1580
	/*
1581
	 * On VLV/CHV display interrupts are part of the display
1582
	 * power well, so hpd is reinitialized from there. For
1583
	 * everyone else do it here.
1584
	 */
6937 serge 1585
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
6084 serge 1586
		intel_hpd_init(dev_priv);
1587
 
5354 serge 1588
	intel_enable_gt_powersave(dev);
5060 serge 1589
 
6937 serge 1590
	enable_rpm_wakeref_asserts(dev_priv);
1591
 
5060 serge 1592
	if (ret)
1593
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1594
	else
1595
		DRM_DEBUG_KMS("Device resumed\n");
1596
 
1597
	return ret;
1598
}
1599
 
5354 serge 1600
/*
1601
 * This function implements common functionality of runtime and system
1602
 * suspend sequence.
1603
 */
1604
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1605
{
1606
	int ret;
1607
 
6084 serge 1608
	if (IS_BROXTON(dev_priv))
1609
		ret = bxt_suspend_complete(dev_priv);
1610
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5354 serge 1611
		ret = hsw_suspend_complete(dev_priv);
6937 serge 1612
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5354 serge 1613
		ret = vlv_suspend_complete(dev_priv);
1614
	else
1615
		ret = 0;
1616
 
1617
	return ret;
1618
}
1619
 
5060 serge 1620
static const struct dev_pm_ops i915_pm_ops = {
5354 serge 1621
	/*
1622
	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1623
	 * PMSG_RESUME]
1624
	 */
5060 serge 1625
	.suspend = i915_pm_suspend,
1626
	.suspend_late = i915_pm_suspend_late,
1627
	.resume_early = i915_pm_resume_early,
1628
	.resume = i915_pm_resume,
5354 serge 1629
 
1630
	/*
1631
	 * S4 event handlers
1632
	 * @freeze, @freeze_late    : called (1) before creating the
1633
	 *                            hibernation image [PMSG_FREEZE] and
1634
	 *                            (2) after rebooting, before restoring
1635
	 *                            the image [PMSG_QUIESCE]
1636
	 * @thaw, @thaw_early       : called (1) after creating the hibernation
1637
	 *                            image, before writing it [PMSG_THAW]
1638
	 *                            and (2) after failing to create or
1639
	 *                            restore the image [PMSG_RECOVER]
1640
	 * @poweroff, @poweroff_late: called after writing the hibernation
1641
	 *                            image, before rebooting [PMSG_HIBERNATE]
1642
	 * @restore, @restore_early : called after rebooting and restoring the
1643
	 *                            hibernation image [PMSG_RESTORE]
1644
	 */
1645
	.freeze = i915_pm_suspend,
1646
	.freeze_late = i915_pm_suspend_late,
1647
	.thaw_early = i915_pm_resume_early,
1648
	.thaw = i915_pm_resume,
1649
	.poweroff = i915_pm_suspend,
6084 serge 1650
	.poweroff_late = i915_pm_poweroff_late,
5060 serge 1651
	.restore_early = i915_pm_resume_early,
1652
	.restore = i915_pm_resume,
5354 serge 1653
 
1654
	/* S0ix (via runtime suspend) event handlers */
5060 serge 1655
	.runtime_suspend = intel_runtime_suspend,
1656
	.runtime_resume = intel_runtime_resume,
1657
};
1658
 
1659
static const struct vm_operations_struct i915_gem_vm_ops = {
1660
	.fault = i915_gem_fault,
1661
	.open = drm_gem_vm_open,
1662
	.close = drm_gem_vm_close,
1663
};
1664
 
1665
static const struct file_operations i915_driver_fops = {
1666
	.owner = THIS_MODULE,
1667
	.open = drm_open,
1668
	.release = drm_release,
1669
	.unlocked_ioctl = drm_ioctl,
1670
	.mmap = drm_gem_mmap,
1671
	.poll = drm_poll,
1672
	.read = drm_read,
1673
#ifdef CONFIG_COMPAT
1674
	.compat_ioctl = i915_compat_ioctl,
1675
#endif
1676
	.llseek = noop_llseek,
1677
};
1678
#endif
1679
 
3260 Serge 1680
static struct drm_driver driver = {
6084 serge 1681
	/* Don't use MTRRs here; the Xserver or userspace app should
1682
	 * deal with them for Intel hardware.
1683
	 */
1684
	.driver_features =
4104 Serge 1685
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
6084 serge 1686
	    DRIVER_RENDER | DRIVER_MODESET,
1687
	.load = i915_driver_load,
3260 Serge 1688
//    .unload = i915_driver_unload,
3263 Serge 1689
      .open = i915_driver_open,
3260 Serge 1690
//    .lastclose = i915_driver_lastclose,
1691
//    .preclose = i915_driver_preclose,
1692
//    .postclose = i915_driver_postclose,
6084 serge 1693
//	.set_busid = drm_pci_set_busid,
3260 Serge 1694
 
4104 Serge 1695
#if defined(CONFIG_DEBUG_FS)
1696
	.debugfs_init = i915_debugfs_init,
1697
	.debugfs_cleanup = i915_debugfs_cleanup,
1698
#endif
3260 Serge 1699
    .gem_free_object = i915_gem_free_object,
1700
 
1701
//    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1702
//    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1703
//    .gem_prime_export = i915_gem_prime_export,
1704
//    .gem_prime_import = i915_gem_prime_import,
1705
 
1706
//    .dumb_create = i915_gem_dumb_create,
1707
//    .dumb_map_offset = i915_gem_mmap_gtt,
1708
//    .dumb_destroy = i915_gem_dumb_destroy,
1709
//    .ioctls = i915_ioctls,
1710
//    .fops = &i915_driver_fops,
1711
//    .name = DRIVER_NAME,
1712
//    .desc = DRIVER_DESC,
1713
//    .date = DRIVER_DATE,
1714
//    .major = DRIVER_MAJOR,
1715
//    .minor = DRIVER_MINOR,
1716
//    .patchlevel = DRIVER_PATCHLEVEL,
1717
};
1718
 
1719
 
3243 Serge 1720
 
3255 Serge 1721
 
4104 Serge 1722
int i915_init(void)
1723
{
1724
    static pci_dev_t device;
1725
    const struct pci_device_id  *ent;
1726
    int  err;
2325 Serge 1727
 
4104 Serge 1728
    ent = find_pci_device(&device, pciidlist);
1729
    if( unlikely(ent == NULL) )
1730
    {
1731
        dbgprintf("device not found\n");
1732
        return -ENODEV;
1733
    };
2325 Serge 1734
 
4104 Serge 1735
    drm_core_init();
3255 Serge 1736
 
4104 Serge 1737
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
1738
                                device.pci_dev.device);
2325 Serge 1739
 
6937 serge 1740
    driver.driver_features |= DRIVER_MODESET+DRIVER_ATOMIC;
4293 Serge 1741
 
4104 Serge 1742
    err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
3263 Serge 1743
 
4104 Serge 1744
    return err;
1745
}
2325 Serge 1746
 
2330 Serge 1747
 
6084 serge 1748
MODULE_AUTHOR("Tungsten Graphics, Inc.");
1749
MODULE_AUTHOR("Intel Corporation");
2325 Serge 1750
 
6084 serge 1751
MODULE_DESCRIPTION(DRIVER_DESC);
1752
MODULE_LICENSE("GPL and additional rights");