Subversion Repositories Kolibri OS

Rev

Rev 6084 | Rev 6660 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2325 Serge 1
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2
 */
3
/*
4
 *
5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6
 * All Rights Reserved.
7
 *
8
 * Permission is hereby granted, free of charge, to any person obtaining a
9
 * copy of this software and associated documentation files (the
10
 * "Software"), to deal in the Software without restriction, including
11
 * without limitation the rights to use, copy, modify, merge, publish,
12
 * distribute, sub license, and/or sell copies of the Software, and to
13
 * permit persons to whom the Software is furnished to do so, subject to
14
 * the following conditions:
15
 *
16
 * The above copyright notice and this permission notice (including the
17
 * next paragraph) shall be included in all copies or substantial portions
18
 * of the Software.
19
 *
20
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
 *
28
 */
29
 
6084 serge 30
#include 
3031 serge 31
#include 
32
#include 
2330 Serge 33
#include "i915_drv.h"
4126 Serge 34
#include "i915_trace.h"
2330 Serge 35
#include "intel_drv.h"
2325 Serge 36
 
37
#include 
6084 serge 38
#include 
2325 Serge 39
#include 
40
#include 
6320 serge 41
#include 
2325 Serge 42
 
3031 serge 43
#include 
44
 
2325 Serge 45
#include 
46
 
5060 serge 47
static struct drm_driver driver;
2330 Serge 48
 
5060 serge 49
#define GEN_DEFAULT_PIPEOFFSETS \
50
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
51
			  PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
52
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
53
			   TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
54
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
3031 serge 55
 
5060 serge 56
#define GEN_CHV_PIPEOFFSETS \
57
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
58
			  CHV_PIPE_C_OFFSET }, \
59
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
60
			   CHV_TRANSCODER_C_OFFSET, }, \
61
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
62
			     CHV_PALETTE_C_OFFSET }
3031 serge 63
 
5060 serge 64
#define CURSOR_OFFSETS \
65
	.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
2330 Serge 66
 
5060 serge 67
#define IVB_CURSOR_OFFSETS \
68
	.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
2330 Serge 69
 
5060 serge 70
int init_display_kms(struct drm_device *dev);
2330 Serge 71
 
3031 serge 72
 
4104 Serge 73
extern int intel_agp_enabled;
74
 
2326 Serge 75
#define PCI_VENDOR_ID_INTEL        0x8086
76
 
2325 Serge 77
 
2339 Serge 78
static const struct intel_device_info intel_i915g_info = {
3746 Serge 79
	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
2339 Serge 80
	.has_overlay = 1, .overlay_needs_physical = 1,
4560 Serge 81
	.ring_mask = RENDER_RING,
5060 serge 82
	GEN_DEFAULT_PIPEOFFSETS,
83
	CURSOR_OFFSETS,
2339 Serge 84
};
85
static const struct intel_device_info intel_i915gm_info = {
3746 Serge 86
	.gen = 3, .is_mobile = 1, .num_pipes = 2,
2339 Serge 87
	.cursor_needs_physical = 1,
88
	.has_overlay = 1, .overlay_needs_physical = 1,
89
	.supports_tv = 1,
4560 Serge 90
	.has_fbc = 1,
91
	.ring_mask = RENDER_RING,
5060 serge 92
	GEN_DEFAULT_PIPEOFFSETS,
93
	CURSOR_OFFSETS,
2339 Serge 94
};
95
static const struct intel_device_info intel_i945g_info = {
3746 Serge 96
	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
2339 Serge 97
	.has_overlay = 1, .overlay_needs_physical = 1,
4560 Serge 98
	.ring_mask = RENDER_RING,
5060 serge 99
	GEN_DEFAULT_PIPEOFFSETS,
100
	CURSOR_OFFSETS,
2339 Serge 101
};
102
static const struct intel_device_info intel_i945gm_info = {
3746 Serge 103
	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
2339 Serge 104
	.has_hotplug = 1, .cursor_needs_physical = 1,
105
	.has_overlay = 1, .overlay_needs_physical = 1,
106
	.supports_tv = 1,
4560 Serge 107
	.has_fbc = 1,
108
	.ring_mask = RENDER_RING,
5060 serge 109
	GEN_DEFAULT_PIPEOFFSETS,
110
	CURSOR_OFFSETS,
2339 Serge 111
};
112
 
113
static const struct intel_device_info intel_i965g_info = {
3746 Serge 114
	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
2339 Serge 115
	.has_hotplug = 1,
116
	.has_overlay = 1,
4560 Serge 117
	.ring_mask = RENDER_RING,
5060 serge 118
	GEN_DEFAULT_PIPEOFFSETS,
119
	CURSOR_OFFSETS,
2339 Serge 120
};
121
 
122
static const struct intel_device_info intel_i965gm_info = {
3746 Serge 123
	.gen = 4, .is_crestline = 1, .num_pipes = 2,
2339 Serge 124
	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
125
	.has_overlay = 1,
126
	.supports_tv = 1,
4560 Serge 127
	.ring_mask = RENDER_RING,
5060 serge 128
	GEN_DEFAULT_PIPEOFFSETS,
129
	CURSOR_OFFSETS,
2339 Serge 130
};
131
 
132
static const struct intel_device_info intel_g33_info = {
3746 Serge 133
	.gen = 3, .is_g33 = 1, .num_pipes = 2,
2339 Serge 134
	.need_gfx_hws = 1, .has_hotplug = 1,
135
	.has_overlay = 1,
4560 Serge 136
	.ring_mask = RENDER_RING,
5060 serge 137
	GEN_DEFAULT_PIPEOFFSETS,
138
	CURSOR_OFFSETS,
2339 Serge 139
};
140
 
141
static const struct intel_device_info intel_g45_info = {
3746 Serge 142
	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
2339 Serge 143
	.has_pipe_cxsr = 1, .has_hotplug = 1,
4560 Serge 144
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 145
	GEN_DEFAULT_PIPEOFFSETS,
146
	CURSOR_OFFSETS,
2339 Serge 147
};
148
 
149
static const struct intel_device_info intel_gm45_info = {
3746 Serge 150
	.gen = 4, .is_g4x = 1, .num_pipes = 2,
2339 Serge 151
	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
152
	.has_pipe_cxsr = 1, .has_hotplug = 1,
153
	.supports_tv = 1,
4560 Serge 154
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 155
	GEN_DEFAULT_PIPEOFFSETS,
156
	CURSOR_OFFSETS,
2339 Serge 157
};
158
 
159
static const struct intel_device_info intel_pineview_info = {
3746 Serge 160
	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
2339 Serge 161
	.need_gfx_hws = 1, .has_hotplug = 1,
162
	.has_overlay = 1,
5060 serge 163
	GEN_DEFAULT_PIPEOFFSETS,
164
	CURSOR_OFFSETS,
2339 Serge 165
};
166
 
167
static const struct intel_device_info intel_ironlake_d_info = {
3746 Serge 168
	.gen = 5, .num_pipes = 2,
3031 serge 169
	.need_gfx_hws = 1, .has_hotplug = 1,
4560 Serge 170
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 171
	GEN_DEFAULT_PIPEOFFSETS,
172
	CURSOR_OFFSETS,
2339 Serge 173
};
174
 
175
static const struct intel_device_info intel_ironlake_m_info = {
3746 Serge 176
	.gen = 5, .is_mobile = 1, .num_pipes = 2,
2339 Serge 177
	.need_gfx_hws = 1, .has_hotplug = 1,
178
	.has_fbc = 1,
4560 Serge 179
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 180
	GEN_DEFAULT_PIPEOFFSETS,
181
	CURSOR_OFFSETS,
2339 Serge 182
};
183
 
2325 Serge 184
static const struct intel_device_info intel_sandybridge_d_info = {
3746 Serge 185
	.gen = 6, .num_pipes = 2,
2330 Serge 186
	.need_gfx_hws = 1, .has_hotplug = 1,
4560 Serge 187
	.has_fbc = 1,
188
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
3031 serge 189
	.has_llc = 1,
5060 serge 190
	GEN_DEFAULT_PIPEOFFSETS,
191
	CURSOR_OFFSETS,
2325 Serge 192
};
193
 
194
static const struct intel_device_info intel_sandybridge_m_info = {
3746 Serge 195
	.gen = 6, .is_mobile = 1, .num_pipes = 2,
2330 Serge 196
	.need_gfx_hws = 1, .has_hotplug = 1,
6084 serge 197
	.has_fbc = 1,
4560 Serge 198
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
3031 serge 199
	.has_llc = 1,
5060 serge 200
	GEN_DEFAULT_PIPEOFFSETS,
201
	CURSOR_OFFSETS,
2325 Serge 202
};
203
 
3746 Serge 204
#define GEN7_FEATURES  \
205
	.gen = 7, .num_pipes = 3, \
206
	.need_gfx_hws = 1, .has_hotplug = 1, \
4560 Serge 207
	.has_fbc = 1, \
208
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
209
	.has_llc = 1
3746 Serge 210
 
2339 Serge 211
static const struct intel_device_info intel_ivybridge_d_info = {
3746 Serge 212
	GEN7_FEATURES,
213
	.is_ivybridge = 1,
5060 serge 214
	GEN_DEFAULT_PIPEOFFSETS,
215
	IVB_CURSOR_OFFSETS,
2339 Serge 216
};
2325 Serge 217
 
2339 Serge 218
static const struct intel_device_info intel_ivybridge_m_info = {
3746 Serge 219
	GEN7_FEATURES,
220
	.is_ivybridge = 1,
221
	.is_mobile = 1,
5060 serge 222
	GEN_DEFAULT_PIPEOFFSETS,
223
	IVB_CURSOR_OFFSETS,
2339 Serge 224
};
225
 
3746 Serge 226
static const struct intel_device_info intel_ivybridge_q_info = {
227
	GEN7_FEATURES,
228
	.is_ivybridge = 1,
229
	.num_pipes = 0, /* legal, last one wins */
5060 serge 230
	GEN_DEFAULT_PIPEOFFSETS,
231
	IVB_CURSOR_OFFSETS,
3746 Serge 232
};
233
 
3031 serge 234
static const struct intel_device_info intel_valleyview_m_info = {
3746 Serge 235
	GEN7_FEATURES,
236
	.is_mobile = 1,
237
	.num_pipes = 2,
3031 serge 238
	.is_valleyview = 1,
3480 Serge 239
	.display_mmio_offset = VLV_DISPLAY_BASE,
4560 Serge 240
	.has_fbc = 0, /* legal, last one wins */
3746 Serge 241
	.has_llc = 0, /* legal, last one wins */
5060 serge 242
	GEN_DEFAULT_PIPEOFFSETS,
243
	CURSOR_OFFSETS,
3031 serge 244
};
245
 
246
static const struct intel_device_info intel_valleyview_d_info = {
3746 Serge 247
	GEN7_FEATURES,
248
	.num_pipes = 2,
3031 serge 249
	.is_valleyview = 1,
3480 Serge 250
	.display_mmio_offset = VLV_DISPLAY_BASE,
4560 Serge 251
	.has_fbc = 0, /* legal, last one wins */
3746 Serge 252
	.has_llc = 0, /* legal, last one wins */
5060 serge 253
	GEN_DEFAULT_PIPEOFFSETS,
254
	CURSOR_OFFSETS,
3031 serge 255
};
256
 
257
static const struct intel_device_info intel_haswell_d_info = {
3746 Serge 258
	GEN7_FEATURES,
259
	.is_haswell = 1,
4104 Serge 260
	.has_ddi = 1,
261
	.has_fpga_dbg = 1,
4560 Serge 262
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
5060 serge 263
	GEN_DEFAULT_PIPEOFFSETS,
264
	IVB_CURSOR_OFFSETS,
3031 serge 265
};
266
 
267
static const struct intel_device_info intel_haswell_m_info = {
3746 Serge 268
	GEN7_FEATURES,
269
	.is_haswell = 1,
270
	.is_mobile = 1,
4104 Serge 271
	.has_ddi = 1,
272
	.has_fpga_dbg = 1,
4560 Serge 273
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
5060 serge 274
	GEN_DEFAULT_PIPEOFFSETS,
275
	IVB_CURSOR_OFFSETS,
3031 serge 276
};
277
 
4560 Serge 278
static const struct intel_device_info intel_broadwell_d_info = {
279
	.gen = 8, .num_pipes = 3,
280
	.need_gfx_hws = 1, .has_hotplug = 1,
281
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
282
	.has_llc = 1,
283
	.has_ddi = 1,
5060 serge 284
	.has_fpga_dbg = 1,
285
	.has_fbc = 1,
286
	GEN_DEFAULT_PIPEOFFSETS,
287
	IVB_CURSOR_OFFSETS,
4560 Serge 288
};
289
 
290
static const struct intel_device_info intel_broadwell_m_info = {
291
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
292
	.need_gfx_hws = 1, .has_hotplug = 1,
293
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
294
	.has_llc = 1,
295
	.has_ddi = 1,
5060 serge 296
	.has_fpga_dbg = 1,
297
	.has_fbc = 1,
298
	GEN_DEFAULT_PIPEOFFSETS,
299
	IVB_CURSOR_OFFSETS,
4560 Serge 300
};
301
 
5060 serge 302
static const struct intel_device_info intel_broadwell_gt3d_info = {
303
	.gen = 8, .num_pipes = 3,
304
	.need_gfx_hws = 1, .has_hotplug = 1,
305
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
306
	.has_llc = 1,
307
	.has_ddi = 1,
308
	.has_fpga_dbg = 1,
309
	.has_fbc = 1,
310
	GEN_DEFAULT_PIPEOFFSETS,
311
	IVB_CURSOR_OFFSETS,
312
};
313
 
314
static const struct intel_device_info intel_broadwell_gt3m_info = {
315
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
316
	.need_gfx_hws = 1, .has_hotplug = 1,
317
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
318
	.has_llc = 1,
319
	.has_ddi = 1,
320
	.has_fpga_dbg = 1,
321
	.has_fbc = 1,
322
	GEN_DEFAULT_PIPEOFFSETS,
323
	IVB_CURSOR_OFFSETS,
324
};
325
 
326
static const struct intel_device_info intel_cherryview_info = {
327
	.gen = 8, .num_pipes = 3,
328
	.need_gfx_hws = 1, .has_hotplug = 1,
329
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
330
	.is_valleyview = 1,
331
	.display_mmio_offset = VLV_DISPLAY_BASE,
332
	GEN_CHV_PIPEOFFSETS,
333
	CURSOR_OFFSETS,
334
};
335
 
5354 serge 336
static const struct intel_device_info intel_skylake_info = {
337
	.is_skylake = 1,
338
	.gen = 9, .num_pipes = 3,
339
	.need_gfx_hws = 1, .has_hotplug = 1,
340
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
341
	.has_llc = 1,
342
	.has_ddi = 1,
6084 serge 343
	.has_fpga_dbg = 1,
5354 serge 344
	.has_fbc = 1,
345
	GEN_DEFAULT_PIPEOFFSETS,
346
	IVB_CURSOR_OFFSETS,
347
};
348
 
6084 serge 349
static const struct intel_device_info intel_skylake_gt3_info = {
350
	.is_skylake = 1,
351
	.gen = 9, .num_pipes = 3,
352
	.need_gfx_hws = 1, .has_hotplug = 1,
353
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
354
	.has_llc = 1,
355
	.has_ddi = 1,
356
	.has_fpga_dbg = 1,
357
	.has_fbc = 1,
358
	GEN_DEFAULT_PIPEOFFSETS,
359
	IVB_CURSOR_OFFSETS,
360
};
361
 
362
static const struct intel_device_info intel_broxton_info = {
363
	.is_preliminary = 1,
364
	.gen = 9,
365
	.need_gfx_hws = 1, .has_hotplug = 1,
366
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
367
	.num_pipes = 3,
368
	.has_ddi = 1,
369
	.has_fpga_dbg = 1,
370
	.has_fbc = 1,
371
	GEN_DEFAULT_PIPEOFFSETS,
372
	IVB_CURSOR_OFFSETS,
373
};
374
 
4104 Serge 375
/*
376
 * Make sure any device matches here are from most specific to most
377
 * general.  For example, since the Quanta match is based on the subsystem
378
 * and subvendor IDs, we need it to come before the more general IVB
379
 * PCI ID matches, otherwise we'll use the wrong info struct above.
380
 */
381
#define INTEL_PCI_IDS \
382
	INTEL_I915G_IDS(&intel_i915g_info),	\
383
	INTEL_I915GM_IDS(&intel_i915gm_info),	\
384
	INTEL_I945G_IDS(&intel_i945g_info),	\
385
	INTEL_I945GM_IDS(&intel_i945gm_info),	\
386
	INTEL_I965G_IDS(&intel_i965g_info),	\
387
	INTEL_G33_IDS(&intel_g33_info),		\
388
	INTEL_I965GM_IDS(&intel_i965gm_info),	\
389
	INTEL_GM45_IDS(&intel_gm45_info), 	\
390
	INTEL_G45_IDS(&intel_g45_info), 	\
391
	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
392
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
393
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
394
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
395
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
396
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
397
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
398
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
399
	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
400
	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
401
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
4560 Serge 402
	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
5060 serge 403
	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),	\
404
	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),	\
405
	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),	\
406
	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
5354 serge 407
	INTEL_CHV_IDS(&intel_cherryview_info),	\
6084 serge 408
	INTEL_SKL_GT1_IDS(&intel_skylake_info),	\
409
	INTEL_SKL_GT2_IDS(&intel_skylake_info),	\
410
	INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),	\
411
	INTEL_BXT_IDS(&intel_broxton_info)
4104 Serge 412
 
6084 serge 413
static const struct pci_device_id pciidlist[] = {		/* aka */
4104 Serge 414
	INTEL_PCI_IDS,
6084 serge 415
	{0, 0, 0}
2325 Serge 416
};
417
 
2326 Serge 418
#define INTEL_PCH_DEVICE_ID_MASK        0xff00
419
#define INTEL_PCH_IBX_DEVICE_ID_TYPE    0x3b00
420
#define INTEL_PCH_CPT_DEVICE_ID_TYPE    0x1c00
421
#define INTEL_PCH_PPT_DEVICE_ID_TYPE    0x1e00
3031 serge 422
#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
6084 serge 423
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
424
{
425
	enum intel_pch ret = PCH_NOP;
2325 Serge 426
 
6084 serge 427
	/*
428
	 * In a virtualized passthrough environment we can be in a
429
	 * setup where the ISA bridge is not able to be passed through.
430
	 * In this case, a south bridge can be emulated and we have to
431
	 * make an educated guess as to which PCH is really there.
432
	 */
433
 
434
	if (IS_GEN5(dev)) {
435
		ret = PCH_IBX;
436
		DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
437
	} else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
438
		ret = PCH_CPT;
439
		DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
440
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
441
		ret = PCH_LPT;
442
		DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
443
	} else if (IS_SKYLAKE(dev)) {
444
		ret = PCH_SPT;
445
		DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
446
	}
447
 
448
	return ret;
449
}
450
 
2342 Serge 451
void intel_detect_pch(struct drm_device *dev)
2326 Serge 452
{
6084 serge 453
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 454
	struct pci_dev *pch = NULL;
2326 Serge 455
 
3746 Serge 456
	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
457
	 * (which really amounts to a PCH but no South Display).
458
	 */
459
	if (INTEL_INFO(dev)->num_pipes == 0) {
460
		dev_priv->pch_type = PCH_NOP;
461
		return;
462
	}
463
 
6084 serge 464
	/*
465
	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
466
	 * make graphics device passthrough work easy for VMM, that only
467
	 * need to expose ISA bridge to let driver know the real hardware
468
	 * underneath. This is a requirement from virtualization team.
4104 Serge 469
	 *
470
	 * In some virtualized environments (e.g. XEN), there is irrelevant
471
	 * ISA bridge in the system. To work reliably, we should scan trhough
472
	 * all the ISA bridge devices and check for the first match, instead
473
	 * of only checking the first one.
6084 serge 474
	 */
5060 serge 475
	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
6084 serge 476
		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
5060 serge 477
			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
3243 Serge 478
			dev_priv->pch_id = id;
2326 Serge 479
 
6084 serge 480
			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
481
				dev_priv->pch_type = PCH_IBX;
482
				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
3243 Serge 483
				WARN_ON(!IS_GEN5(dev));
6084 serge 484
			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
485
				dev_priv->pch_type = PCH_CPT;
486
				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
3243 Serge 487
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
6084 serge 488
			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
489
				/* PantherPoint is CPT compatible */
490
				dev_priv->pch_type = PCH_CPT;
4560 Serge 491
				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
3243 Serge 492
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
3031 serge 493
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
494
				dev_priv->pch_type = PCH_LPT;
495
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
6084 serge 496
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
497
				WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
3243 Serge 498
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
499
				dev_priv->pch_type = PCH_LPT;
500
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
6084 serge 501
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
502
				WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
5354 serge 503
			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
504
				dev_priv->pch_type = PCH_SPT;
505
				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
506
				WARN_ON(!IS_SKYLAKE(dev));
507
			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
508
				dev_priv->pch_type = PCH_SPT;
509
				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
510
				WARN_ON(!IS_SKYLAKE(dev));
6320 serge 511
			} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
512
				   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
513
				    pch->subsystem_vendor == 0x1af4 &&
514
				    pch->subsystem_device == 0x1100)) {
6084 serge 515
				dev_priv->pch_type = intel_virt_detect_pch(dev);
5060 serge 516
			} else
517
				continue;
518
 
4104 Serge 519
			break;
6084 serge 520
		}
521
	}
4104 Serge 522
	if (!pch)
5060 serge 523
		DRM_DEBUG_KMS("No PCH found.\n");
524
 
525
//	pci_dev_put(pch);
2326 Serge 526
}
527
 
3031 serge 528
bool i915_semaphore_is_enabled(struct drm_device *dev)
2326 Serge 529
{
3031 serge 530
	if (INTEL_INFO(dev)->gen < 6)
4560 Serge 531
		return false;
2326 Serge 532
 
5060 serge 533
	if (i915.semaphores >= 0)
534
		return i915.semaphores;
535
 
5354 serge 536
	/* TODO: make semaphores and Execlists play nicely together */
537
	if (i915.enable_execlists)
538
		return false;
539
 
4560 Serge 540
	/* Until we get further testing... */
5060 serge 541
	if (IS_GEN8(dev))
4560 Serge 542
		return false;
543
 
3031 serge 544
#ifdef CONFIG_INTEL_IOMMU
545
	/* Enable semaphores on SNB when IO remapping is off */
546
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
547
		return false;
548
#endif
2326 Serge 549
 
4560 Serge 550
	return true;
2326 Serge 551
}
552
 
4104 Serge 553
#if 0
6084 serge 554
void i915_firmware_load_error_print(const char *fw_path, int err)
555
{
556
	DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
557
 
558
	/*
559
	 * If the reason is not known assume -ENOENT since that's the most
560
	 * usual failure mode.
561
	 */
562
	if (!err)
563
		err = -ENOENT;
564
 
565
	if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
566
		return;
567
 
568
	DRM_ERROR(
569
	  "The driver is built-in, so to load the firmware you need to\n"
570
	  "include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
571
	  "in your initrd/initramfs image.\n");
572
}
573
 
5060 serge 574
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
575
{
576
	struct drm_device *dev = dev_priv->dev;
577
	struct drm_encoder *encoder;
578
 
579
	drm_modeset_lock_all(dev);
580
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
581
		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
582
 
583
		if (intel_encoder->suspend)
584
			intel_encoder->suspend(intel_encoder);
585
	}
586
	drm_modeset_unlock_all(dev);
587
}
588
 
5354 serge 589
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
590
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
591
			      bool rpm_resume);
6084 serge 592
static int skl_resume_prepare(struct drm_i915_private *dev_priv);
593
static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
5354 serge 594
 
6084 serge 595
 
5354 serge 596
static int i915_drm_suspend(struct drm_device *dev)
4104 Serge 597
{
598
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 599
	pci_power_t opregion_target_state;
6084 serge 600
	int error;
2342 Serge 601
 
4104 Serge 602
	/* ignore lid events during suspend */
603
	mutex_lock(&dev_priv->modeset_restore_lock);
604
	dev_priv->modeset_restore = MODESET_SUSPENDED;
605
	mutex_unlock(&dev_priv->modeset_restore_lock);
2342 Serge 606
 
4104 Serge 607
	/* We do a lot of poking in a lot of registers, make sure they work
608
	 * properly. */
5060 serge 609
	intel_display_set_init_power(dev_priv, true);
2342 Serge 610
 
4104 Serge 611
	drm_kms_helper_poll_disable(dev);
2342 Serge 612
 
4104 Serge 613
	pci_save_state(dev->pdev);
2325 Serge 614
 
6084 serge 615
	error = i915_gem_suspend(dev);
616
	if (error) {
617
		dev_err(&dev->pdev->dev,
618
			"GEM idle failed, resume might fail\n");
619
		return error;
620
	}
4104 Serge 621
 
6084 serge 622
	intel_guc_suspend(dev);
4104 Serge 623
 
6084 serge 624
	intel_suspend_gt_powersave(dev);
5354 serge 625
 
6084 serge 626
	/*
627
	 * Disable CRTCs directly since we want to preserve sw state
628
	 * for _thaw. Also, power gate the CRTC power wells.
629
	 */
630
	drm_modeset_lock_all(dev);
631
	intel_display_suspend(dev);
632
	drm_modeset_unlock_all(dev);
4104 Serge 633
 
6084 serge 634
	intel_dp_mst_suspend(dev);
5060 serge 635
 
6084 serge 636
	intel_runtime_pm_disable_interrupts(dev_priv);
637
	intel_hpd_cancel_work(dev_priv);
5060 serge 638
 
6084 serge 639
	intel_suspend_encoders(dev_priv);
5060 serge 640
 
6084 serge 641
	intel_suspend_hw(dev);
4104 Serge 642
 
4560 Serge 643
	i915_gem_suspend_gtt_mappings(dev);
644
 
4104 Serge 645
	i915_save_state(dev);
646
 
5060 serge 647
	opregion_target_state = PCI_D3cold;
648
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
649
	if (acpi_target_system_state() < ACPI_STATE_S3)
650
		opregion_target_state = PCI_D1;
651
#endif
652
	intel_opregion_notify_adapter(dev, opregion_target_state);
653
 
654
	intel_uncore_forcewake_reset(dev, false);
4104 Serge 655
	intel_opregion_fini(dev);
656
 
5354 serge 657
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
4104 Serge 658
 
5060 serge 659
	dev_priv->suspend_count++;
660
 
661
	intel_display_set_init_power(dev_priv, false);
662
 
4104 Serge 663
	return 0;
664
}
665
 
6084 serge 666
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
2325 Serge 667
{
5354 serge 668
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
669
	int ret;
670
 
671
	ret = intel_suspend_complete(dev_priv);
672
 
673
	if (ret) {
674
		DRM_ERROR("Suspend complete failed: %d\n", ret);
675
 
676
		return ret;
677
	}
678
 
679
	pci_disable_device(drm_dev->pdev);
6084 serge 680
	/*
681
	 * During hibernation on some platforms the BIOS may try to access
682
	 * the device even though it's already in D3 and hang the machine. So
683
	 * leave the device in D0 on those platforms and hope the BIOS will
684
	 * power down the device properly. The issue was seen on multiple old
685
	 * GENs with different BIOS vendors, so having an explicit blacklist
686
	 * is inpractical; apply the workaround on everything pre GEN6. The
687
	 * platforms where the issue was seen:
688
	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
689
	 * Fujitsu FSC S7110
690
	 * Acer Aspire 1830T
691
	 */
692
	if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
693
		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
5354 serge 694
 
695
	return 0;
696
}
697
 
6084 serge 698
int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
5354 serge 699
{
4104 Serge 700
	int error;
2325 Serge 701
 
4104 Serge 702
	if (!dev || !dev->dev_private) {
703
		DRM_ERROR("dev: %p\n", dev);
704
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
705
		return -ENODEV;
706
	}
2325 Serge 707
 
5354 serge 708
	if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
709
			 state.event != PM_EVENT_FREEZE))
710
		return -EINVAL;
3031 serge 711
 
4104 Serge 712
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
713
		return 0;
2325 Serge 714
 
5354 serge 715
	error = i915_drm_suspend(dev);
4104 Serge 716
	if (error)
717
		return error;
3031 serge 718
 
6084 serge 719
	return i915_drm_suspend_late(dev, false);
4104 Serge 720
}
2325 Serge 721
 
5354 serge 722
static int i915_drm_resume(struct drm_device *dev)
4104 Serge 723
{
5060 serge 724
	struct drm_i915_private *dev_priv = dev->dev_private;
3260 Serge 725
 
6084 serge 726
	mutex_lock(&dev->struct_mutex);
727
	i915_gem_restore_gtt_mappings(dev);
728
	mutex_unlock(&dev->struct_mutex);
4560 Serge 729
 
4104 Serge 730
	i915_restore_state(dev);
731
	intel_opregion_setup(dev);
732
 
6084 serge 733
	intel_init_pch_refclk(dev);
734
	drm_mode_config_reset(dev);
4104 Serge 735
 
6084 serge 736
	/*
737
	 * Interrupts have to be enabled before any batches are run. If not the
738
	 * GPU will hang. i915_gem_init_hw() will initiate batches to
739
	 * update/restore the context.
740
	 *
741
	 * Modeset enabling in intel_modeset_init_hw() also needs working
742
	 * interrupts.
743
	 */
744
	intel_runtime_pm_enable_interrupts(dev_priv);
4104 Serge 745
 
6084 serge 746
	mutex_lock(&dev->struct_mutex);
747
	if (i915_gem_init_hw(dev)) {
748
		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
749
			atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
750
	}
751
	mutex_unlock(&dev->struct_mutex);
4104 Serge 752
 
6084 serge 753
	intel_guc_resume(dev);
4104 Serge 754
 
6084 serge 755
	intel_modeset_init_hw(dev);
5060 serge 756
 
6084 serge 757
	spin_lock_irq(&dev_priv->irq_lock);
758
	if (dev_priv->display.hpd_irq_setup)
759
		dev_priv->display.hpd_irq_setup(dev);
760
	spin_unlock_irq(&dev_priv->irq_lock);
4104 Serge 761
 
6084 serge 762
	drm_modeset_lock_all(dev);
763
	intel_display_resume(dev);
764
	drm_modeset_unlock_all(dev);
5354 serge 765
 
6084 serge 766
	intel_dp_mst_resume(dev);
4104 Serge 767
 
6084 serge 768
	/*
769
	 * ... but also need to make sure that hotplug processing
770
	 * doesn't cause havoc. Like in the driver load code we don't
771
	 * bother with the tiny race here where we might loose hotplug
772
	 * notifications.
773
	 * */
774
	intel_hpd_init(dev_priv);
775
	/* Config may have changed between suspend and resume */
776
	drm_helper_hpd_irq_event(dev);
777
 
4104 Serge 778
	intel_opregion_init(dev);
779
 
5354 serge 780
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
4104 Serge 781
 
782
	mutex_lock(&dev_priv->modeset_restore_lock);
783
	dev_priv->modeset_restore = MODESET_DONE;
784
	mutex_unlock(&dev_priv->modeset_restore_lock);
4560 Serge 785
 
5060 serge 786
	intel_opregion_notify_adapter(dev, PCI_D0);
787
 
5354 serge 788
	drm_kms_helper_poll_enable(dev);
789
 
5060 serge 790
	return 0;
4104 Serge 791
}
792
 
5354 serge 793
static int i915_drm_resume_early(struct drm_device *dev)
4104 Serge 794
{
5354 serge 795
	struct drm_i915_private *dev_priv = dev->dev_private;
796
	int ret = 0;
4104 Serge 797
 
5060 serge 798
	/*
799
	 * We have a resume ordering issue with the snd-hda driver also
800
	 * requiring our device to be power up. Due to the lack of a
801
	 * parent/child relationship we currently solve this with an early
802
	 * resume hook.
803
	 *
804
	 * FIXME: This should be solved with a special hdmi sink device or
805
	 * similar so that power domains can be employed.
806
	 */
4104 Serge 807
	if (pci_enable_device(dev->pdev))
808
		return -EIO;
809
 
810
	pci_set_master(dev->pdev);
811
 
5354 serge 812
	if (IS_VALLEYVIEW(dev_priv))
813
		ret = vlv_resume_prepare(dev_priv, false);
814
	if (ret)
6084 serge 815
		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
816
			  ret);
5354 serge 817
 
818
	intel_uncore_early_sanitize(dev, true);
819
 
6084 serge 820
	if (IS_BROXTON(dev))
821
		ret = bxt_resume_prepare(dev_priv);
822
	else if (IS_SKYLAKE(dev_priv))
823
		ret = skl_resume_prepare(dev_priv);
824
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5354 serge 825
		hsw_disable_pc8(dev_priv);
826
 
827
	intel_uncore_sanitize(dev);
828
	intel_power_domains_init_hw(dev_priv);
829
 
830
	return ret;
5060 serge 831
}
832
 
6084 serge 833
int i915_resume_switcheroo(struct drm_device *dev)
5060 serge 834
{
835
	int ret;
836
 
5354 serge 837
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
838
		return 0;
839
 
840
	ret = i915_drm_resume_early(dev);
4104 Serge 841
	if (ret)
842
		return ret;
843
 
5354 serge 844
	return i915_drm_resume(dev);
4104 Serge 845
}
846
 
847
/**
848
 * i915_reset - reset chip after a hang
849
 * @dev: drm device to reset
850
 *
851
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
852
 * reset or otherwise an error code.
853
 *
854
 * Procedure is fairly simple:
855
 *   - reset the chip using the reset reg
856
 *   - re-init context state
857
 *   - re-init hardware status page
858
 *   - re-init ring buffer
859
 *   - re-init interrupt state
860
 *   - re-init display
861
 */
862
int i915_reset(struct drm_device *dev)
863
{
5060 serge 864
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 865
	bool simulated;
866
	int ret;
867
 
6084 serge 868
	intel_reset_gt_powersave(dev);
4104 Serge 869
 
870
	mutex_lock(&dev->struct_mutex);
871
 
872
	i915_gem_reset(dev);
873
 
874
	simulated = dev_priv->gpu_error.stop_rings != 0;
875
 
6084 serge 876
	ret = intel_gpu_reset(dev);
4104 Serge 877
 
6084 serge 878
	/* Also reset the gpu hangman. */
879
	if (simulated) {
880
		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
881
		dev_priv->gpu_error.stop_rings = 0;
882
		if (ret == -ENODEV) {
4560 Serge 883
			DRM_INFO("Reset not implemented, but ignoring "
6084 serge 884
				 "error for simulated gpu hangs\n");
885
			ret = 0;
886
		}
4104 Serge 887
	}
4560 Serge 888
 
5354 serge 889
	if (i915_stop_ring_allow_warn(dev_priv))
890
		pr_notice("drm/i915: Resetting chip after gpu hang\n");
891
 
4104 Serge 892
	if (ret) {
4560 Serge 893
		DRM_ERROR("Failed to reset chip: %i\n", ret);
4104 Serge 894
		mutex_unlock(&dev->struct_mutex);
895
		return ret;
896
	}
897
 
898
	/* Ok, now get things going again... */
899
 
900
	/*
901
	 * Everything depends on having the GTT running, so we need to start
902
	 * there.  Fortunately we don't need to do this unless we reset the
903
	 * chip at a PCI level.
904
	 *
905
	 * Next we need to restore the context, but we don't use those
906
	 * yet either...
907
	 *
908
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
909
	 * was running at the time of the reset (i.e. we weren't VT
910
	 * switched away).
911
	 */
912
 
6084 serge 913
	/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
914
	dev_priv->gpu_error.reload_in_reset = true;
5354 serge 915
 
6084 serge 916
	ret = i915_gem_init_hw(dev);
5354 serge 917
 
6084 serge 918
	dev_priv->gpu_error.reload_in_reset = false;
4104 Serge 919
 
6084 serge 920
	mutex_unlock(&dev->struct_mutex);
921
	if (ret) {
922
		DRM_ERROR("Failed hw init on reset %d\n", ret);
923
		return ret;
4104 Serge 924
	}
925
 
6084 serge 926
	/*
927
	 * rps/rc6 re-init is necessary to restore state lost after the
928
	 * reset and the re-install of gt irqs. Skip for ironlake per
929
	 * previous concerns that it doesn't respond well to some forms
930
	 * of re-init after reset.
931
	 */
932
	if (INTEL_INFO(dev)->gen > 5)
933
		intel_enable_gt_powersave(dev);
934
 
4104 Serge 935
	return 0;
936
}
937
 
938
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
939
{
940
	struct intel_device_info *intel_info =
941
		(struct intel_device_info *) ent->driver_data;
942
 
5060 serge 943
	if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
4560 Serge 944
		DRM_INFO("This hardware requires preliminary hardware support.\n"
945
			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
946
		return -ENODEV;
947
	}
948
 
4104 Serge 949
	/* Only bind to function 0 of the device. Early generations
950
	 * used function 1 as a placeholder for multi-head. This causes
951
	 * us confusion instead, especially on the systems where both
952
	 * functions have the same PCI-ID!
953
	 */
954
	if (PCI_FUNC(pdev->devfn))
955
		return -ENODEV;
956
 
957
	return drm_get_pci_dev(pdev, ent, &driver);
958
}
959
 
960
static void
961
i915_pci_remove(struct pci_dev *pdev)
962
{
963
	struct drm_device *dev = pci_get_drvdata(pdev);
964
 
965
	drm_put_dev(dev);
966
}
967
 
968
static int i915_pm_suspend(struct device *dev)
969
{
970
	struct pci_dev *pdev = to_pci_dev(dev);
971
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
972
 
973
	if (!drm_dev || !drm_dev->dev_private) {
974
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
975
		return -ENODEV;
976
	}
977
 
978
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
979
		return 0;
980
 
5354 serge 981
	return i915_drm_suspend(drm_dev);
5060 serge 982
}
4104 Serge 983
 
5060 serge 984
static int i915_pm_suspend_late(struct device *dev)
985
{
6084 serge 986
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
5060 serge 987
 
988
	/*
6084 serge 989
	 * We have a suspend ordering issue with the snd-hda driver also
5060 serge 990
	 * requiring our device to be power up. Due to the lack of a
991
	 * parent/child relationship we currently solve this with an late
992
	 * suspend hook.
993
	 *
994
	 * FIXME: This should be solved with a special hdmi sink device or
995
	 * similar so that power domains can be employed.
996
	 */
997
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
998
		return 0;
999
 
6084 serge 1000
	return i915_drm_suspend_late(drm_dev, false);
4104 Serge 1001
}
1002
 
6084 serge 1003
static int i915_pm_poweroff_late(struct device *dev)
1004
{
1005
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1006
 
1007
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1008
		return 0;
1009
 
1010
	return i915_drm_suspend_late(drm_dev, true);
1011
}
1012
 
5060 serge 1013
static int i915_pm_resume_early(struct device *dev)
1014
{
6084 serge 1015
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
5060 serge 1016
 
5354 serge 1017
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1018
		return 0;
1019
 
1020
	return i915_drm_resume_early(drm_dev);
5060 serge 1021
}
1022
 
4104 Serge 1023
static int i915_pm_resume(struct device *dev)
1024
{
6084 serge 1025
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
4104 Serge 1026
 
5354 serge 1027
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1028
		return 0;
4104 Serge 1029
 
5354 serge 1030
	return i915_drm_resume(drm_dev);
4104 Serge 1031
}
1032
 
6084 serge 1033
static int skl_suspend_complete(struct drm_i915_private *dev_priv)
1034
{
1035
	/* Enabling DC6 is not a hard requirement to enter runtime D3 */
1036
 
1037
	skl_uninit_cdclk(dev_priv);
1038
 
1039
	return 0;
1040
}
1041
 
5354 serge 1042
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
5060 serge 1043
{
1044
	hsw_enable_pc8(dev_priv);
4560 Serge 1045
 
5060 serge 1046
	return 0;
1047
}
1048
 
6084 serge 1049
static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
1050
{
1051
	struct drm_device *dev = dev_priv->dev;
1052
 
1053
	/* TODO: when DC5 support is added disable DC5 here. */
1054
 
1055
	broxton_ddi_phy_uninit(dev);
1056
	broxton_uninit_cdclk(dev);
1057
	bxt_enable_dc9(dev_priv);
1058
 
1059
	return 0;
1060
}
1061
 
1062
static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
1063
{
1064
	struct drm_device *dev = dev_priv->dev;
1065
 
1066
	/* TODO: when CSR FW support is added make sure the FW is loaded */
1067
 
1068
	bxt_disable_dc9(dev_priv);
1069
 
1070
	/*
1071
	 * TODO: when DC5 support is added enable DC5 here if the CSR FW
1072
	 * is available.
1073
	 */
1074
	broxton_init_cdclk(dev);
1075
	broxton_ddi_phy_init(dev);
1076
	intel_prepare_ddi(dev);
1077
 
1078
	return 0;
1079
}
1080
 
1081
static int skl_resume_prepare(struct drm_i915_private *dev_priv)
1082
{
1083
	struct drm_device *dev = dev_priv->dev;
1084
 
1085
	skl_init_cdclk(dev_priv);
1086
	intel_csr_load_program(dev);
1087
 
1088
	return 0;
1089
}
1090
 
5060 serge 1091
/*
1092
 * Save all Gunit registers that may be lost after a D3 and a subsequent
1093
 * S0i[R123] transition. The list of registers needing a save/restore is
1094
 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
1095
 * registers in the following way:
1096
 * - Driver: saved/restored by the driver
1097
 * - Punit : saved/restored by the Punit firmware
1098
 * - No, w/o marking: no need to save/restore, since the register is R/O or
1099
 *                    used internally by the HW in a way that doesn't depend
1100
 *                    keeping the content across a suspend/resume.
1101
 * - Debug : used for debugging
1102
 *
1103
 * We save/restore all registers marked with 'Driver', with the following
1104
 * exceptions:
1105
 * - Registers out of use, including also registers marked with 'Debug'.
1106
 *   These have no effect on the driver's operation, so we don't save/restore
1107
 *   them to reduce the overhead.
1108
 * - Registers that are fully setup by an initialization function called from
1109
 *   the resume path. For example many clock gating and RPS/RC6 registers.
1110
 * - Registers that provide the right functionality with their reset defaults.
1111
 *
1112
 * TODO: Except for registers that based on the above 3 criteria can be safely
1113
 * ignored, we save/restore all others, practically treating the HW context as
1114
 * a black-box for the driver. Further investigation is needed to reduce the
1115
 * saved/restored registers even further, by following the same 3 criteria.
1116
 */
1117
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1118
{
1119
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1120
	int i;
1121
 
1122
	/* GAM 0x4000-0x4770 */
1123
	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
1124
	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
1125
	s->arb_mode		= I915_READ(ARB_MODE);
1126
	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
1127
	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);
1128
 
1129
	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
6084 serge 1130
		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
5060 serge 1131
 
1132
	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
6084 serge 1133
	s->gfx_max_req_count	= I915_READ(GEN7_GFX_MAX_REQ_COUNT);
5060 serge 1134
 
1135
	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
1136
	s->ecochk		= I915_READ(GAM_ECOCHK);
1137
	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
1138
	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);
1139
 
1140
	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);
1141
 
1142
	/* MBC 0x9024-0x91D0, 0x8500 */
1143
	s->g3dctl		= I915_READ(VLV_G3DCTL);
1144
	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
1145
	s->mbctl		= I915_READ(GEN6_MBCTL);
1146
 
1147
	/* GCP 0x9400-0x9424, 0x8100-0x810C */
1148
	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
1149
	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
1150
	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
1151
	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
1152
	s->rstctl		= I915_READ(GEN6_RSTCTL);
1153
	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);
1154
 
1155
	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1156
	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
1157
	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
1158
	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
1159
	s->ecobus		= I915_READ(ECOBUS);
1160
	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
1161
	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
1162
	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
1163
	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
1164
	s->rcedata		= I915_READ(VLV_RCEDATA);
1165
	s->spare2gh		= I915_READ(VLV_SPAREG2H);
1166
 
1167
	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1168
	s->gt_imr		= I915_READ(GTIMR);
1169
	s->gt_ier		= I915_READ(GTIER);
1170
	s->pm_imr		= I915_READ(GEN6_PMIMR);
1171
	s->pm_ier		= I915_READ(GEN6_PMIER);
1172
 
1173
	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
6084 serge 1174
		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
5060 serge 1175
 
1176
	/* GT SA CZ domain, 0x100000-0x138124 */
1177
	s->tilectl		= I915_READ(TILECTL);
1178
	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
1179
	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
1180
	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1181
	s->pmwgicz		= I915_READ(VLV_PMWGICZ);
1182
 
1183
	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
1184
	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
1185
	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
6084 serge 1186
	s->pcbr			= I915_READ(VLV_PCBR);
5060 serge 1187
	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);
1188
 
1189
	/*
1190
	 * Not saving any of:
1191
	 * DFT,		0x9800-0x9EC0
1192
	 * SARB,	0xB000-0xB1FC
1193
	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
1194
	 * PCI CFG
1195
	 */
1196
}
1197
 
1198
static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1199
{
1200
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1201
	u32 val;
1202
	int i;
1203
 
1204
	/* GAM 0x4000-0x4770 */
1205
	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
1206
	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
1207
	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
1208
	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
1209
	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);
1210
 
1211
	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
6084 serge 1212
		I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
5060 serge 1213
 
1214
	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
6084 serge 1215
	I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
5060 serge 1216
 
1217
	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
1218
	I915_WRITE(GAM_ECOCHK,		s->ecochk);
1219
	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
1220
	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);
1221
 
1222
	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);
1223
 
1224
	/* MBC 0x9024-0x91D0, 0x8500 */
1225
	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
1226
	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
1227
	I915_WRITE(GEN6_MBCTL,		s->mbctl);
1228
 
1229
	/* GCP 0x9400-0x9424, 0x8100-0x810C */
1230
	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
1231
	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
1232
	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
1233
	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
1234
	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
1235
	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);
1236
 
1237
	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1238
	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
1239
	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
1240
	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
1241
	I915_WRITE(ECOBUS,		s->ecobus);
1242
	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
1243
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
1244
	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
1245
	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
1246
	I915_WRITE(VLV_RCEDATA,		s->rcedata);
1247
	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);
1248
 
1249
	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1250
	I915_WRITE(GTIMR,		s->gt_imr);
1251
	I915_WRITE(GTIER,		s->gt_ier);
1252
	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
1253
	I915_WRITE(GEN6_PMIER,		s->pm_ier);
1254
 
1255
	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
6084 serge 1256
		I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
5060 serge 1257
 
1258
	/* GT SA CZ domain, 0x100000-0x138124 */
1259
	I915_WRITE(TILECTL,			s->tilectl);
1260
	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
1261
	/*
1262
	 * Preserve the GT allow wake and GFX force clock bit, they are not
1263
	 * be restored, as they are used to control the s0ix suspend/resume
1264
	 * sequence by the caller.
1265
	 */
1266
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
1267
	val &= VLV_GTLC_ALLOWWAKEREQ;
1268
	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
1269
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1270
 
1271
	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1272
	val &= VLV_GFX_CLK_FORCE_ON_BIT;
1273
	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
1274
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1275
 
1276
	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);
1277
 
1278
	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
1279
	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
1280
	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
6084 serge 1281
	I915_WRITE(VLV_PCBR,			s->pcbr);
5060 serge 1282
	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
1283
}
4104 Serge 1284
#endif
1285
 
5060 serge 1286
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1287
{
1288
	u32 val;
1289
	int err;
1290
 
1291
#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1292
 
1293
	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1294
	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1295
	if (force_on)
1296
		val |= VLV_GFX_CLK_FORCE_ON_BIT;
1297
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1298
 
1299
	if (!force_on)
1300
		return 0;
1301
 
1302
	err = wait_for(COND, 20);
1303
	if (err)
1304
		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1305
			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1306
 
1307
	return err;
1308
#undef COND
1309
}
1310
#if 0
1311
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
1312
{
1313
	u32 val;
1314
	int err = 0;
1315
 
1316
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
1317
	val &= ~VLV_GTLC_ALLOWWAKEREQ;
1318
	if (allow)
1319
		val |= VLV_GTLC_ALLOWWAKEREQ;
1320
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1321
	POSTING_READ(VLV_GTLC_WAKE_CTRL);
1322
 
1323
#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1324
	      allow)
1325
	err = wait_for(COND, 1);
1326
	if (err)
1327
		DRM_ERROR("timeout disabling GT waking\n");
1328
	return err;
1329
#undef COND
1330
}
1331
 
1332
static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1333
				 bool wait_for_on)
1334
{
1335
	u32 mask;
1336
	u32 val;
1337
	int err;
1338
 
1339
	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
1340
	val = wait_for_on ? mask : 0;
1341
#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1342
	if (COND)
1343
		return 0;
1344
 
1345
	DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
1346
			wait_for_on ? "on" : "off",
1347
			I915_READ(VLV_GTLC_PW_STATUS));
1348
 
1349
	/*
1350
	 * RC6 transitioning can be delayed up to 2 msec (see
1351
	 * valleyview_enable_rps), use 3 msec for safety.
1352
	 */
1353
	err = wait_for(COND, 3);
1354
	if (err)
1355
		DRM_ERROR("timeout waiting for GT wells to go %s\n",
1356
			  wait_for_on ? "on" : "off");
1357
 
1358
	return err;
1359
#undef COND
1360
}
1361
 
1362
static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
1363
{
1364
	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
1365
		return;
1366
 
1367
	DRM_ERROR("GT register access while GT waking disabled\n");
1368
	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1369
}
1370
 
5354 serge 1371
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
5060 serge 1372
{
1373
	u32 mask;
1374
	int err;
1375
 
1376
	/*
1377
	 * Bspec defines the following GT well on flags as debug only, so
1378
	 * don't treat them as hard failures.
1379
	 */
1380
	(void)vlv_wait_for_gt_wells(dev_priv, false);
1381
 
1382
	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
1383
	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
1384
 
1385
	vlv_check_no_gt_access(dev_priv);
1386
 
1387
	err = vlv_force_gfx_clock(dev_priv, true);
1388
	if (err)
1389
		goto err1;
1390
 
1391
	err = vlv_allow_gt_wake(dev_priv, false);
1392
	if (err)
1393
		goto err2;
1394
 
6084 serge 1395
	if (!IS_CHERRYVIEW(dev_priv->dev))
1396
		vlv_save_gunit_s0ix_state(dev_priv);
1397
 
5060 serge 1398
	err = vlv_force_gfx_clock(dev_priv, false);
1399
	if (err)
1400
		goto err2;
1401
 
1402
	return 0;
1403
 
1404
err2:
1405
	/* For safety always re-enable waking and disable gfx clock forcing */
1406
	vlv_allow_gt_wake(dev_priv, true);
1407
err1:
1408
	vlv_force_gfx_clock(dev_priv, false);
1409
 
1410
	return err;
1411
}
1412
 
5354 serge 1413
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1414
				bool rpm_resume)
5060 serge 1415
{
1416
	struct drm_device *dev = dev_priv->dev;
1417
	int err;
1418
	int ret;
1419
 
1420
	/*
1421
	 * If any of the steps fail just try to continue, that's the best we
1422
	 * can do at this point. Return the first error code (which will also
1423
	 * leave RPM permanently disabled).
1424
	 */
1425
	ret = vlv_force_gfx_clock(dev_priv, true);
1426
 
6084 serge 1427
	if (!IS_CHERRYVIEW(dev_priv->dev))
1428
		vlv_restore_gunit_s0ix_state(dev_priv);
5060 serge 1429
 
1430
	err = vlv_allow_gt_wake(dev_priv, true);
1431
	if (!ret)
1432
		ret = err;
1433
 
1434
	err = vlv_force_gfx_clock(dev_priv, false);
1435
	if (!ret)
1436
		ret = err;
1437
 
1438
	vlv_check_no_gt_access(dev_priv);
1439
 
5354 serge 1440
	if (rpm_resume) {
6084 serge 1441
		intel_init_clock_gating(dev);
1442
		i915_gem_restore_fences(dev);
5354 serge 1443
	}
5060 serge 1444
 
1445
	return ret;
1446
}
1447
 
1448
static int intel_runtime_suspend(struct device *device)
1449
{
1450
	struct pci_dev *pdev = to_pci_dev(device);
1451
	struct drm_device *dev = pci_get_drvdata(pdev);
1452
	struct drm_i915_private *dev_priv = dev->dev_private;
1453
	int ret;
1454
 
1455
	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1456
		return -ENODEV;
1457
 
5354 serge 1458
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1459
		return -ENODEV;
1460
 
5060 serge 1461
	DRM_DEBUG_KMS("Suspending device\n");
1462
 
1463
	/*
1464
	 * We could deadlock here in case another thread holding struct_mutex
1465
	 * calls RPM suspend concurrently, since the RPM suspend will wait
1466
	 * first for this RPM suspend to finish. In this case the concurrent
1467
	 * RPM resume will be followed by its RPM suspend counterpart. Still
1468
	 * for consistency return -EAGAIN, which will reschedule this suspend.
1469
	 */
1470
	if (!mutex_trylock(&dev->struct_mutex)) {
1471
		DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1472
		/*
1473
		 * Bump the expiration timestamp, otherwise the suspend won't
1474
		 * be rescheduled.
1475
		 */
1476
		pm_runtime_mark_last_busy(device);
1477
 
1478
		return -EAGAIN;
1479
	}
1480
	/*
1481
	 * We are safe here against re-faults, since the fault handler takes
1482
	 * an RPM reference.
1483
	 */
1484
	i915_gem_release_all_mmaps(dev_priv);
1485
	mutex_unlock(&dev->struct_mutex);
1486
 
6084 serge 1487
	intel_guc_suspend(dev);
1488
 
5354 serge 1489
	intel_suspend_gt_powersave(dev);
1490
	intel_runtime_pm_disable_interrupts(dev_priv);
5060 serge 1491
 
5354 serge 1492
	ret = intel_suspend_complete(dev_priv);
5060 serge 1493
	if (ret) {
1494
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
5354 serge 1495
		intel_runtime_pm_enable_interrupts(dev_priv);
5060 serge 1496
 
1497
		return ret;
1498
	}
1499
 
6084 serge 1500
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1501
	intel_uncore_forcewake_reset(dev, false);
5060 serge 1502
	dev_priv->pm.suspended = true;
1503
 
1504
	/*
5354 serge 1505
	 * FIXME: We really should find a document that references the arguments
1506
	 * used below!
1507
	 */
6084 serge 1508
	if (IS_BROADWELL(dev)) {
5354 serge 1509
		/*
1510
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1511
		 * being detected, and the call we do at intel_runtime_resume()
1512
		 * won't be able to restore them. Since PCI_D3hot matches the
6084 serge 1513
		 * actual specification and appears to be working, use it.
5354 serge 1514
		 */
1515
		intel_opregion_notify_adapter(dev, PCI_D3hot);
6084 serge 1516
	} else {
1517
		/*
1518
		 * current versions of firmware which depend on this opregion
1519
		 * notification have repurposed the D1 definition to mean
1520
		 * "runtime suspended" vs. what you would normally expect (D3)
1521
		 * to distinguish it from notifications that might be sent via
1522
		 * the suspend path.
1523
		 */
1524
		intel_opregion_notify_adapter(dev, PCI_D1);
5354 serge 1525
	}
5060 serge 1526
 
6084 serge 1527
	assert_forcewakes_inactive(dev_priv);
1528
 
5060 serge 1529
	DRM_DEBUG_KMS("Device suspended\n");
1530
	return 0;
1531
}
1532
 
1533
static int intel_runtime_resume(struct device *device)
1534
{
1535
	struct pci_dev *pdev = to_pci_dev(device);
1536
	struct drm_device *dev = pci_get_drvdata(pdev);
1537
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 1538
	int ret = 0;
5060 serge 1539
 
5354 serge 1540
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1541
		return -ENODEV;
5060 serge 1542
 
1543
	DRM_DEBUG_KMS("Resuming device\n");
1544
 
1545
	intel_opregion_notify_adapter(dev, PCI_D0);
1546
	dev_priv->pm.suspended = false;
1547
 
6084 serge 1548
	intel_guc_resume(dev);
1549
 
5354 serge 1550
	if (IS_GEN6(dev_priv))
1551
		intel_init_pch_refclk(dev);
6084 serge 1552
 
1553
	if (IS_BROXTON(dev))
1554
		ret = bxt_resume_prepare(dev_priv);
1555
	else if (IS_SKYLAKE(dev))
1556
		ret = skl_resume_prepare(dev_priv);
5354 serge 1557
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1558
		hsw_disable_pc8(dev_priv);
1559
	else if (IS_VALLEYVIEW(dev_priv))
1560
		ret = vlv_resume_prepare(dev_priv, true);
5060 serge 1561
 
1562
	/*
1563
	 * No point of rolling back things in case of an error, as the best
1564
	 * we can do is to hope that things will still work (and disable RPM).
1565
	 */
1566
	i915_gem_init_swizzling(dev);
1567
	gen6_update_ring_freq(dev);
1568
 
5354 serge 1569
	intel_runtime_pm_enable_interrupts(dev_priv);
6084 serge 1570
 
1571
	/*
1572
	 * On VLV/CHV display interrupts are part of the display
1573
	 * power well, so hpd is reinitialized from there. For
1574
	 * everyone else do it here.
1575
	 */
1576
	if (!IS_VALLEYVIEW(dev_priv))
1577
		intel_hpd_init(dev_priv);
1578
 
5354 serge 1579
	intel_enable_gt_powersave(dev);
5060 serge 1580
 
1581
	if (ret)
1582
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1583
	else
1584
		DRM_DEBUG_KMS("Device resumed\n");
1585
 
1586
	return ret;
1587
}
1588
 
5354 serge 1589
/*
1590
 * This function implements common functionality of runtime and system
1591
 * suspend sequence.
1592
 */
1593
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1594
{
1595
	int ret;
1596
 
6084 serge 1597
	if (IS_BROXTON(dev_priv))
1598
		ret = bxt_suspend_complete(dev_priv);
1599
	else if (IS_SKYLAKE(dev_priv))
1600
		ret = skl_suspend_complete(dev_priv);
1601
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5354 serge 1602
		ret = hsw_suspend_complete(dev_priv);
6084 serge 1603
	else if (IS_VALLEYVIEW(dev_priv))
5354 serge 1604
		ret = vlv_suspend_complete(dev_priv);
1605
	else
1606
		ret = 0;
1607
 
1608
	return ret;
1609
}
1610
 
5060 serge 1611
static const struct dev_pm_ops i915_pm_ops = {
5354 serge 1612
	/*
1613
	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1614
	 * PMSG_RESUME]
1615
	 */
5060 serge 1616
	.suspend = i915_pm_suspend,
1617
	.suspend_late = i915_pm_suspend_late,
1618
	.resume_early = i915_pm_resume_early,
1619
	.resume = i915_pm_resume,
5354 serge 1620
 
1621
	/*
1622
	 * S4 event handlers
1623
	 * @freeze, @freeze_late    : called (1) before creating the
1624
	 *                            hibernation image [PMSG_FREEZE] and
1625
	 *                            (2) after rebooting, before restoring
1626
	 *                            the image [PMSG_QUIESCE]
1627
	 * @thaw, @thaw_early       : called (1) after creating the hibernation
1628
	 *                            image, before writing it [PMSG_THAW]
1629
	 *                            and (2) after failing to create or
1630
	 *                            restore the image [PMSG_RECOVER]
1631
	 * @poweroff, @poweroff_late: called after writing the hibernation
1632
	 *                            image, before rebooting [PMSG_HIBERNATE]
1633
	 * @restore, @restore_early : called after rebooting and restoring the
1634
	 *                            hibernation image [PMSG_RESTORE]
1635
	 */
1636
	.freeze = i915_pm_suspend,
1637
	.freeze_late = i915_pm_suspend_late,
1638
	.thaw_early = i915_pm_resume_early,
1639
	.thaw = i915_pm_resume,
1640
	.poweroff = i915_pm_suspend,
6084 serge 1641
	.poweroff_late = i915_pm_poweroff_late,
5060 serge 1642
	.restore_early = i915_pm_resume_early,
1643
	.restore = i915_pm_resume,
5354 serge 1644
 
1645
	/* S0ix (via runtime suspend) event handlers */
5060 serge 1646
	.runtime_suspend = intel_runtime_suspend,
1647
	.runtime_resume = intel_runtime_resume,
1648
};
1649
 
1650
static const struct vm_operations_struct i915_gem_vm_ops = {
1651
	.fault = i915_gem_fault,
1652
	.open = drm_gem_vm_open,
1653
	.close = drm_gem_vm_close,
1654
};
1655
 
1656
static const struct file_operations i915_driver_fops = {
1657
	.owner = THIS_MODULE,
1658
	.open = drm_open,
1659
	.release = drm_release,
1660
	.unlocked_ioctl = drm_ioctl,
1661
	.mmap = drm_gem_mmap,
1662
	.poll = drm_poll,
1663
	.read = drm_read,
1664
#ifdef CONFIG_COMPAT
1665
	.compat_ioctl = i915_compat_ioctl,
1666
#endif
1667
	.llseek = noop_llseek,
1668
};
1669
#endif
1670
 
3260 Serge 1671
static struct drm_driver driver = {
6084 serge 1672
	/* Don't use MTRRs here; the Xserver or userspace app should
1673
	 * deal with them for Intel hardware.
1674
	 */
1675
	.driver_features =
4104 Serge 1676
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
6084 serge 1677
	    DRIVER_RENDER | DRIVER_MODESET,
1678
	.load = i915_driver_load,
3260 Serge 1679
//    .unload = i915_driver_unload,
3263 Serge 1680
      .open = i915_driver_open,
3260 Serge 1681
//    .lastclose = i915_driver_lastclose,
1682
//    .preclose = i915_driver_preclose,
1683
//    .postclose = i915_driver_postclose,
6084 serge 1684
//	.set_busid = drm_pci_set_busid,
3260 Serge 1685
 
4104 Serge 1686
#if defined(CONFIG_DEBUG_FS)
1687
	.debugfs_init = i915_debugfs_init,
1688
	.debugfs_cleanup = i915_debugfs_cleanup,
1689
#endif
3260 Serge 1690
    .gem_free_object = i915_gem_free_object,
1691
 
1692
//    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1693
//    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1694
//    .gem_prime_export = i915_gem_prime_export,
1695
//    .gem_prime_import = i915_gem_prime_import,
1696
 
1697
//    .dumb_create = i915_gem_dumb_create,
1698
//    .dumb_map_offset = i915_gem_mmap_gtt,
1699
//    .dumb_destroy = i915_gem_dumb_destroy,
1700
//    .ioctls = i915_ioctls,
1701
//    .fops = &i915_driver_fops,
1702
//    .name = DRIVER_NAME,
1703
//    .desc = DRIVER_DESC,
1704
//    .date = DRIVER_DATE,
1705
//    .major = DRIVER_MAJOR,
1706
//    .minor = DRIVER_MINOR,
1707
//    .patchlevel = DRIVER_PATCHLEVEL,
1708
};
1709
 
1710
 
3243 Serge 1711
 
3255 Serge 1712
 
4104 Serge 1713
int i915_init(void)
1714
{
1715
    static pci_dev_t device;
1716
    const struct pci_device_id  *ent;
1717
    int  err;
2325 Serge 1718
 
4104 Serge 1719
    ent = find_pci_device(&device, pciidlist);
1720
    if( unlikely(ent == NULL) )
1721
    {
1722
        dbgprintf("device not found\n");
1723
        return -ENODEV;
1724
    };
2325 Serge 1725
 
4104 Serge 1726
    drm_core_init();
3255 Serge 1727
 
4104 Serge 1728
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
1729
                                device.pci_dev.device);
2325 Serge 1730
 
4293 Serge 1731
    driver.driver_features |= DRIVER_MODESET;
1732
 
4104 Serge 1733
    err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
3263 Serge 1734
 
4104 Serge 1735
    return err;
1736
}
2325 Serge 1737
 
2330 Serge 1738
 
6084 serge 1739
MODULE_AUTHOR("Tungsten Graphics, Inc.");
1740
MODULE_AUTHOR("Intel Corporation");
2325 Serge 1741
 
6084 serge 1742
MODULE_DESCRIPTION(DRIVER_DESC);
1743
MODULE_LICENSE("GPL and additional rights");