Subversion Repositories Kolibri OS

Rev

Rev 5367 | Rev 6320 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2325 Serge 1
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2
 */
3
/*
4
 *
5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6
 * All Rights Reserved.
7
 *
8
 * Permission is hereby granted, free of charge, to any person obtaining a
9
 * copy of this software and associated documentation files (the
10
 * "Software"), to deal in the Software without restriction, including
11
 * without limitation the rights to use, copy, modify, merge, publish,
12
 * distribute, sub license, and/or sell copies of the Software, and to
13
 * permit persons to whom the Software is furnished to do so, subject to
14
 * the following conditions:
15
 *
16
 * The above copyright notice and this permission notice (including the
17
 * next paragraph) shall be included in all copies or substantial portions
18
 * of the Software.
19
 *
20
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
 *
28
 */
29
 
6084 serge 30
#include 
3031 serge 31
#include 
32
#include 
2330 Serge 33
#include "i915_drv.h"
4126 Serge 34
#include "i915_trace.h"
2330 Serge 35
#include "intel_drv.h"
2325 Serge 36
 
37
#include 
6084 serge 38
#include 
2325 Serge 39
#include 
40
#include 
5060 serge 41
#include 
2325 Serge 42
 
3031 serge 43
#include 
44
 
2325 Serge 45
#include 
46
 
5060 serge 47
static struct drm_driver driver;
2330 Serge 48
 
5060 serge 49
#define GEN_DEFAULT_PIPEOFFSETS \
50
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
51
			  PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
52
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
53
			   TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
54
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
3031 serge 55
 
5060 serge 56
#define GEN_CHV_PIPEOFFSETS \
57
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
58
			  CHV_PIPE_C_OFFSET }, \
59
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
60
			   CHV_TRANSCODER_C_OFFSET, }, \
61
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
62
			     CHV_PALETTE_C_OFFSET }
3031 serge 63
 
5060 serge 64
#define CURSOR_OFFSETS \
65
	.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
2330 Serge 66
 
5060 serge 67
#define IVB_CURSOR_OFFSETS \
68
	.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
2330 Serge 69
 
5060 serge 70
int init_display_kms(struct drm_device *dev);
2330 Serge 71
 
3031 serge 72
 
4104 Serge 73
extern int intel_agp_enabled;
74
 
2326 Serge 75
#define PCI_VENDOR_ID_INTEL        0x8086
76
 
2325 Serge 77
 
2339 Serge 78
static const struct intel_device_info intel_i915g_info = {
3746 Serge 79
	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
2339 Serge 80
	.has_overlay = 1, .overlay_needs_physical = 1,
4560 Serge 81
	.ring_mask = RENDER_RING,
5060 serge 82
	GEN_DEFAULT_PIPEOFFSETS,
83
	CURSOR_OFFSETS,
2339 Serge 84
};
85
static const struct intel_device_info intel_i915gm_info = {
3746 Serge 86
	.gen = 3, .is_mobile = 1, .num_pipes = 2,
2339 Serge 87
	.cursor_needs_physical = 1,
88
	.has_overlay = 1, .overlay_needs_physical = 1,
89
	.supports_tv = 1,
4560 Serge 90
	.has_fbc = 1,
91
	.ring_mask = RENDER_RING,
5060 serge 92
	GEN_DEFAULT_PIPEOFFSETS,
93
	CURSOR_OFFSETS,
2339 Serge 94
};
95
static const struct intel_device_info intel_i945g_info = {
3746 Serge 96
	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
2339 Serge 97
	.has_overlay = 1, .overlay_needs_physical = 1,
4560 Serge 98
	.ring_mask = RENDER_RING,
5060 serge 99
	GEN_DEFAULT_PIPEOFFSETS,
100
	CURSOR_OFFSETS,
2339 Serge 101
};
102
static const struct intel_device_info intel_i945gm_info = {
3746 Serge 103
	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
2339 Serge 104
	.has_hotplug = 1, .cursor_needs_physical = 1,
105
	.has_overlay = 1, .overlay_needs_physical = 1,
106
	.supports_tv = 1,
4560 Serge 107
	.has_fbc = 1,
108
	.ring_mask = RENDER_RING,
5060 serge 109
	GEN_DEFAULT_PIPEOFFSETS,
110
	CURSOR_OFFSETS,
2339 Serge 111
};
112
 
113
static const struct intel_device_info intel_i965g_info = {
3746 Serge 114
	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
2339 Serge 115
	.has_hotplug = 1,
116
	.has_overlay = 1,
4560 Serge 117
	.ring_mask = RENDER_RING,
5060 serge 118
	GEN_DEFAULT_PIPEOFFSETS,
119
	CURSOR_OFFSETS,
2339 Serge 120
};
121
 
122
static const struct intel_device_info intel_i965gm_info = {
3746 Serge 123
	.gen = 4, .is_crestline = 1, .num_pipes = 2,
2339 Serge 124
	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
125
	.has_overlay = 1,
126
	.supports_tv = 1,
4560 Serge 127
	.ring_mask = RENDER_RING,
5060 serge 128
	GEN_DEFAULT_PIPEOFFSETS,
129
	CURSOR_OFFSETS,
2339 Serge 130
};
131
 
132
static const struct intel_device_info intel_g33_info = {
3746 Serge 133
	.gen = 3, .is_g33 = 1, .num_pipes = 2,
2339 Serge 134
	.need_gfx_hws = 1, .has_hotplug = 1,
135
	.has_overlay = 1,
4560 Serge 136
	.ring_mask = RENDER_RING,
5060 serge 137
	GEN_DEFAULT_PIPEOFFSETS,
138
	CURSOR_OFFSETS,
2339 Serge 139
};
140
 
141
static const struct intel_device_info intel_g45_info = {
3746 Serge 142
	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
2339 Serge 143
	.has_pipe_cxsr = 1, .has_hotplug = 1,
4560 Serge 144
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 145
	GEN_DEFAULT_PIPEOFFSETS,
146
	CURSOR_OFFSETS,
2339 Serge 147
};
148
 
149
static const struct intel_device_info intel_gm45_info = {
3746 Serge 150
	.gen = 4, .is_g4x = 1, .num_pipes = 2,
2339 Serge 151
	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
152
	.has_pipe_cxsr = 1, .has_hotplug = 1,
153
	.supports_tv = 1,
4560 Serge 154
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 155
	GEN_DEFAULT_PIPEOFFSETS,
156
	CURSOR_OFFSETS,
2339 Serge 157
};
158
 
159
static const struct intel_device_info intel_pineview_info = {
3746 Serge 160
	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
2339 Serge 161
	.need_gfx_hws = 1, .has_hotplug = 1,
162
	.has_overlay = 1,
5060 serge 163
	GEN_DEFAULT_PIPEOFFSETS,
164
	CURSOR_OFFSETS,
2339 Serge 165
};
166
 
167
static const struct intel_device_info intel_ironlake_d_info = {
3746 Serge 168
	.gen = 5, .num_pipes = 2,
3031 serge 169
	.need_gfx_hws = 1, .has_hotplug = 1,
4560 Serge 170
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 171
	GEN_DEFAULT_PIPEOFFSETS,
172
	CURSOR_OFFSETS,
2339 Serge 173
};
174
 
175
static const struct intel_device_info intel_ironlake_m_info = {
3746 Serge 176
	.gen = 5, .is_mobile = 1, .num_pipes = 2,
2339 Serge 177
	.need_gfx_hws = 1, .has_hotplug = 1,
178
	.has_fbc = 1,
4560 Serge 179
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 180
	GEN_DEFAULT_PIPEOFFSETS,
181
	CURSOR_OFFSETS,
2339 Serge 182
};
183
 
2325 Serge 184
static const struct intel_device_info intel_sandybridge_d_info = {
3746 Serge 185
	.gen = 6, .num_pipes = 2,
2330 Serge 186
	.need_gfx_hws = 1, .has_hotplug = 1,
4560 Serge 187
	.has_fbc = 1,
188
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
3031 serge 189
	.has_llc = 1,
5060 serge 190
	GEN_DEFAULT_PIPEOFFSETS,
191
	CURSOR_OFFSETS,
2325 Serge 192
};
193
 
194
static const struct intel_device_info intel_sandybridge_m_info = {
3746 Serge 195
	.gen = 6, .is_mobile = 1, .num_pipes = 2,
2330 Serge 196
	.need_gfx_hws = 1, .has_hotplug = 1,
6084 serge 197
	.has_fbc = 1,
4560 Serge 198
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
3031 serge 199
	.has_llc = 1,
5060 serge 200
	GEN_DEFAULT_PIPEOFFSETS,
201
	CURSOR_OFFSETS,
2325 Serge 202
};
203
 
3746 Serge 204
#define GEN7_FEATURES  \
205
	.gen = 7, .num_pipes = 3, \
206
	.need_gfx_hws = 1, .has_hotplug = 1, \
4560 Serge 207
	.has_fbc = 1, \
208
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
209
	.has_llc = 1
3746 Serge 210
 
2339 Serge 211
static const struct intel_device_info intel_ivybridge_d_info = {
3746 Serge 212
	GEN7_FEATURES,
213
	.is_ivybridge = 1,
5060 serge 214
	GEN_DEFAULT_PIPEOFFSETS,
215
	IVB_CURSOR_OFFSETS,
2339 Serge 216
};
2325 Serge 217
 
2339 Serge 218
static const struct intel_device_info intel_ivybridge_m_info = {
3746 Serge 219
	GEN7_FEATURES,
220
	.is_ivybridge = 1,
221
	.is_mobile = 1,
5060 serge 222
	GEN_DEFAULT_PIPEOFFSETS,
223
	IVB_CURSOR_OFFSETS,
2339 Serge 224
};
225
 
3746 Serge 226
static const struct intel_device_info intel_ivybridge_q_info = {
227
	GEN7_FEATURES,
228
	.is_ivybridge = 1,
229
	.num_pipes = 0, /* legal, last one wins */
5060 serge 230
	GEN_DEFAULT_PIPEOFFSETS,
231
	IVB_CURSOR_OFFSETS,
3746 Serge 232
};
233
 
3031 serge 234
static const struct intel_device_info intel_valleyview_m_info = {
3746 Serge 235
	GEN7_FEATURES,
236
	.is_mobile = 1,
237
	.num_pipes = 2,
3031 serge 238
	.is_valleyview = 1,
3480 Serge 239
	.display_mmio_offset = VLV_DISPLAY_BASE,
4560 Serge 240
	.has_fbc = 0, /* legal, last one wins */
3746 Serge 241
	.has_llc = 0, /* legal, last one wins */
5060 serge 242
	GEN_DEFAULT_PIPEOFFSETS,
243
	CURSOR_OFFSETS,
3031 serge 244
};
245
 
246
static const struct intel_device_info intel_valleyview_d_info = {
3746 Serge 247
	GEN7_FEATURES,
248
	.num_pipes = 2,
3031 serge 249
	.is_valleyview = 1,
3480 Serge 250
	.display_mmio_offset = VLV_DISPLAY_BASE,
4560 Serge 251
	.has_fbc = 0, /* legal, last one wins */
3746 Serge 252
	.has_llc = 0, /* legal, last one wins */
5060 serge 253
	GEN_DEFAULT_PIPEOFFSETS,
254
	CURSOR_OFFSETS,
3031 serge 255
};
256
 
257
static const struct intel_device_info intel_haswell_d_info = {
3746 Serge 258
	GEN7_FEATURES,
259
	.is_haswell = 1,
4104 Serge 260
	.has_ddi = 1,
261
	.has_fpga_dbg = 1,
4560 Serge 262
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
5060 serge 263
	GEN_DEFAULT_PIPEOFFSETS,
264
	IVB_CURSOR_OFFSETS,
3031 serge 265
};
266
 
267
static const struct intel_device_info intel_haswell_m_info = {
3746 Serge 268
	GEN7_FEATURES,
269
	.is_haswell = 1,
270
	.is_mobile = 1,
4104 Serge 271
	.has_ddi = 1,
272
	.has_fpga_dbg = 1,
4560 Serge 273
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
5060 serge 274
	GEN_DEFAULT_PIPEOFFSETS,
275
	IVB_CURSOR_OFFSETS,
3031 serge 276
};
277
 
4560 Serge 278
static const struct intel_device_info intel_broadwell_d_info = {
279
	.gen = 8, .num_pipes = 3,
280
	.need_gfx_hws = 1, .has_hotplug = 1,
281
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
282
	.has_llc = 1,
283
	.has_ddi = 1,
5060 serge 284
	.has_fpga_dbg = 1,
285
	.has_fbc = 1,
286
	GEN_DEFAULT_PIPEOFFSETS,
287
	IVB_CURSOR_OFFSETS,
4560 Serge 288
};
289
 
290
static const struct intel_device_info intel_broadwell_m_info = {
291
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
292
	.need_gfx_hws = 1, .has_hotplug = 1,
293
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
294
	.has_llc = 1,
295
	.has_ddi = 1,
5060 serge 296
	.has_fpga_dbg = 1,
297
	.has_fbc = 1,
298
	GEN_DEFAULT_PIPEOFFSETS,
299
	IVB_CURSOR_OFFSETS,
4560 Serge 300
};
301
 
5060 serge 302
static const struct intel_device_info intel_broadwell_gt3d_info = {
303
	.gen = 8, .num_pipes = 3,
304
	.need_gfx_hws = 1, .has_hotplug = 1,
305
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
306
	.has_llc = 1,
307
	.has_ddi = 1,
308
	.has_fpga_dbg = 1,
309
	.has_fbc = 1,
310
	GEN_DEFAULT_PIPEOFFSETS,
311
	IVB_CURSOR_OFFSETS,
312
};
313
 
314
static const struct intel_device_info intel_broadwell_gt3m_info = {
315
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
316
	.need_gfx_hws = 1, .has_hotplug = 1,
317
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
318
	.has_llc = 1,
319
	.has_ddi = 1,
320
	.has_fpga_dbg = 1,
321
	.has_fbc = 1,
322
	GEN_DEFAULT_PIPEOFFSETS,
323
	IVB_CURSOR_OFFSETS,
324
};
325
 
326
static const struct intel_device_info intel_cherryview_info = {
327
	.gen = 8, .num_pipes = 3,
328
	.need_gfx_hws = 1, .has_hotplug = 1,
329
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
330
	.is_valleyview = 1,
331
	.display_mmio_offset = VLV_DISPLAY_BASE,
332
	GEN_CHV_PIPEOFFSETS,
333
	CURSOR_OFFSETS,
334
};
335
 
5354 serge 336
static const struct intel_device_info intel_skylake_info = {
337
	.is_skylake = 1,
338
	.gen = 9, .num_pipes = 3,
339
	.need_gfx_hws = 1, .has_hotplug = 1,
340
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
341
	.has_llc = 1,
342
	.has_ddi = 1,
6084 serge 343
	.has_fpga_dbg = 1,
5354 serge 344
	.has_fbc = 1,
345
	GEN_DEFAULT_PIPEOFFSETS,
346
	IVB_CURSOR_OFFSETS,
347
};
348
 
6084 serge 349
static const struct intel_device_info intel_skylake_gt3_info = {
350
	.is_skylake = 1,
351
	.gen = 9, .num_pipes = 3,
352
	.need_gfx_hws = 1, .has_hotplug = 1,
353
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
354
	.has_llc = 1,
355
	.has_ddi = 1,
356
	.has_fpga_dbg = 1,
357
	.has_fbc = 1,
358
	GEN_DEFAULT_PIPEOFFSETS,
359
	IVB_CURSOR_OFFSETS,
360
};
361
 
362
static const struct intel_device_info intel_broxton_info = {
363
	.is_preliminary = 1,
364
	.gen = 9,
365
	.need_gfx_hws = 1, .has_hotplug = 1,
366
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
367
	.num_pipes = 3,
368
	.has_ddi = 1,
369
	.has_fpga_dbg = 1,
370
	.has_fbc = 1,
371
	GEN_DEFAULT_PIPEOFFSETS,
372
	IVB_CURSOR_OFFSETS,
373
};
374
 
4104 Serge 375
/*
376
 * Make sure any device matches here are from most specific to most
377
 * general.  For example, since the Quanta match is based on the subsystem
378
 * and subvendor IDs, we need it to come before the more general IVB
379
 * PCI ID matches, otherwise we'll use the wrong info struct above.
380
 */
381
#define INTEL_PCI_IDS \
382
	INTEL_I915G_IDS(&intel_i915g_info),	\
383
	INTEL_I915GM_IDS(&intel_i915gm_info),	\
384
	INTEL_I945G_IDS(&intel_i945g_info),	\
385
	INTEL_I945GM_IDS(&intel_i945gm_info),	\
386
	INTEL_I965G_IDS(&intel_i965g_info),	\
387
	INTEL_G33_IDS(&intel_g33_info),		\
388
	INTEL_I965GM_IDS(&intel_i965gm_info),	\
389
	INTEL_GM45_IDS(&intel_gm45_info), 	\
390
	INTEL_G45_IDS(&intel_g45_info), 	\
391
	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
392
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
393
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
394
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
395
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
396
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
397
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
398
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
399
	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
400
	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
401
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
4560 Serge 402
	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
5060 serge 403
	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),	\
404
	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),	\
405
	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),	\
406
	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
5354 serge 407
	INTEL_CHV_IDS(&intel_cherryview_info),	\
6084 serge 408
	INTEL_SKL_GT1_IDS(&intel_skylake_info),	\
409
	INTEL_SKL_GT2_IDS(&intel_skylake_info),	\
410
	INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),	\
411
	INTEL_BXT_IDS(&intel_broxton_info)
4104 Serge 412
 
6084 serge 413
static const struct pci_device_id pciidlist[] = {		/* aka */
4104 Serge 414
	INTEL_PCI_IDS,
6084 serge 415
	{0, 0, 0}
2325 Serge 416
};
417
 
2326 Serge 418
#define INTEL_PCH_DEVICE_ID_MASK        0xff00
419
#define INTEL_PCH_IBX_DEVICE_ID_TYPE    0x3b00
420
#define INTEL_PCH_CPT_DEVICE_ID_TYPE    0x1c00
421
#define INTEL_PCH_PPT_DEVICE_ID_TYPE    0x1e00
3031 serge 422
#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
6084 serge 423
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
424
{
425
	enum intel_pch ret = PCH_NOP;
2325 Serge 426
 
6084 serge 427
	/*
428
	 * In a virtualized passthrough environment we can be in a
429
	 * setup where the ISA bridge is not able to be passed through.
430
	 * In this case, a south bridge can be emulated and we have to
431
	 * make an educated guess as to which PCH is really there.
432
	 */
433
 
434
	if (IS_GEN5(dev)) {
435
		ret = PCH_IBX;
436
		DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
437
	} else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
438
		ret = PCH_CPT;
439
		DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
440
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
441
		ret = PCH_LPT;
442
		DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
443
	} else if (IS_SKYLAKE(dev)) {
444
		ret = PCH_SPT;
445
		DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
446
	}
447
 
448
	return ret;
449
}
450
 
2342 Serge 451
void intel_detect_pch(struct drm_device *dev)
2326 Serge 452
{
6084 serge 453
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 454
	struct pci_dev *pch = NULL;
2326 Serge 455
 
3746 Serge 456
	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
457
	 * (which really amounts to a PCH but no South Display).
458
	 */
459
	if (INTEL_INFO(dev)->num_pipes == 0) {
460
		dev_priv->pch_type = PCH_NOP;
461
		return;
462
	}
463
 
6084 serge 464
	/*
465
	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
466
	 * make graphics device passthrough work easy for VMM, that only
467
	 * need to expose ISA bridge to let driver know the real hardware
468
	 * underneath. This is a requirement from virtualization team.
4104 Serge 469
	 *
470
	 * In some virtualized environments (e.g. XEN), there is irrelevant
471
	 * ISA bridge in the system. To work reliably, we should scan trhough
472
	 * all the ISA bridge devices and check for the first match, instead
473
	 * of only checking the first one.
6084 serge 474
	 */
5060 serge 475
	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
6084 serge 476
		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
5060 serge 477
			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
3243 Serge 478
			dev_priv->pch_id = id;
2326 Serge 479
 
6084 serge 480
			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
481
				dev_priv->pch_type = PCH_IBX;
482
				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
3243 Serge 483
				WARN_ON(!IS_GEN5(dev));
6084 serge 484
			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
485
				dev_priv->pch_type = PCH_CPT;
486
				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
3243 Serge 487
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
6084 serge 488
			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
489
				/* PantherPoint is CPT compatible */
490
				dev_priv->pch_type = PCH_CPT;
4560 Serge 491
				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
3243 Serge 492
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
3031 serge 493
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
494
				dev_priv->pch_type = PCH_LPT;
495
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
6084 serge 496
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
497
				WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
3243 Serge 498
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
499
				dev_priv->pch_type = PCH_LPT;
500
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
6084 serge 501
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
502
				WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
5354 serge 503
			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
504
				dev_priv->pch_type = PCH_SPT;
505
				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
506
				WARN_ON(!IS_SKYLAKE(dev));
507
			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
508
				dev_priv->pch_type = PCH_SPT;
509
				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
510
				WARN_ON(!IS_SKYLAKE(dev));
6084 serge 511
			} else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) {
512
				dev_priv->pch_type = intel_virt_detect_pch(dev);
5060 serge 513
			} else
514
				continue;
515
 
4104 Serge 516
			break;
6084 serge 517
		}
518
	}
4104 Serge 519
	if (!pch)
5060 serge 520
		DRM_DEBUG_KMS("No PCH found.\n");
521
 
522
//	pci_dev_put(pch);
2326 Serge 523
}
524
 
3031 serge 525
bool i915_semaphore_is_enabled(struct drm_device *dev)
2326 Serge 526
{
3031 serge 527
	if (INTEL_INFO(dev)->gen < 6)
4560 Serge 528
		return false;
2326 Serge 529
 
5060 serge 530
	if (i915.semaphores >= 0)
531
		return i915.semaphores;
532
 
5354 serge 533
	/* TODO: make semaphores and Execlists play nicely together */
534
	if (i915.enable_execlists)
535
		return false;
536
 
4560 Serge 537
	/* Until we get further testing... */
5060 serge 538
	if (IS_GEN8(dev))
4560 Serge 539
		return false;
540
 
3031 serge 541
#ifdef CONFIG_INTEL_IOMMU
542
	/* Enable semaphores on SNB when IO remapping is off */
543
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
544
		return false;
545
#endif
2326 Serge 546
 
4560 Serge 547
	return true;
2326 Serge 548
}
549
 
4104 Serge 550
#if 0
6084 serge 551
void i915_firmware_load_error_print(const char *fw_path, int err)
552
{
553
	DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
554
 
555
	/*
556
	 * If the reason is not known assume -ENOENT since that's the most
557
	 * usual failure mode.
558
	 */
559
	if (!err)
560
		err = -ENOENT;
561
 
562
	if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
563
		return;
564
 
565
	DRM_ERROR(
566
	  "The driver is built-in, so to load the firmware you need to\n"
567
	  "include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
568
	  "in your initrd/initramfs image.\n");
569
}
570
 
5060 serge 571
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
572
{
573
	struct drm_device *dev = dev_priv->dev;
574
	struct drm_encoder *encoder;
575
 
576
	drm_modeset_lock_all(dev);
577
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
578
		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
579
 
580
		if (intel_encoder->suspend)
581
			intel_encoder->suspend(intel_encoder);
582
	}
583
	drm_modeset_unlock_all(dev);
584
}
585
 
5354 serge 586
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
587
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
588
			      bool rpm_resume);
6084 serge 589
static int skl_resume_prepare(struct drm_i915_private *dev_priv);
590
static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
5354 serge 591
 
6084 serge 592
 
5354 serge 593
static int i915_drm_suspend(struct drm_device *dev)
4104 Serge 594
{
595
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 596
	pci_power_t opregion_target_state;
6084 serge 597
	int error;
2342 Serge 598
 
4104 Serge 599
	/* ignore lid events during suspend */
600
	mutex_lock(&dev_priv->modeset_restore_lock);
601
	dev_priv->modeset_restore = MODESET_SUSPENDED;
602
	mutex_unlock(&dev_priv->modeset_restore_lock);
2342 Serge 603
 
4104 Serge 604
	/* We do a lot of poking in a lot of registers, make sure they work
605
	 * properly. */
5060 serge 606
	intel_display_set_init_power(dev_priv, true);
2342 Serge 607
 
4104 Serge 608
	drm_kms_helper_poll_disable(dev);
2342 Serge 609
 
4104 Serge 610
	pci_save_state(dev->pdev);
2325 Serge 611
 
6084 serge 612
	error = i915_gem_suspend(dev);
613
	if (error) {
614
		dev_err(&dev->pdev->dev,
615
			"GEM idle failed, resume might fail\n");
616
		return error;
617
	}
4104 Serge 618
 
6084 serge 619
	intel_guc_suspend(dev);
4104 Serge 620
 
6084 serge 621
	intel_suspend_gt_powersave(dev);
5354 serge 622
 
6084 serge 623
	/*
624
	 * Disable CRTCs directly since we want to preserve sw state
625
	 * for _thaw. Also, power gate the CRTC power wells.
626
	 */
627
	drm_modeset_lock_all(dev);
628
	intel_display_suspend(dev);
629
	drm_modeset_unlock_all(dev);
4104 Serge 630
 
6084 serge 631
	intel_dp_mst_suspend(dev);
5060 serge 632
 
6084 serge 633
	intel_runtime_pm_disable_interrupts(dev_priv);
634
	intel_hpd_cancel_work(dev_priv);
5060 serge 635
 
6084 serge 636
	intel_suspend_encoders(dev_priv);
5060 serge 637
 
6084 serge 638
	intel_suspend_hw(dev);
4104 Serge 639
 
4560 Serge 640
	i915_gem_suspend_gtt_mappings(dev);
641
 
4104 Serge 642
	i915_save_state(dev);
643
 
5060 serge 644
	opregion_target_state = PCI_D3cold;
645
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
646
	if (acpi_target_system_state() < ACPI_STATE_S3)
647
		opregion_target_state = PCI_D1;
648
#endif
649
	intel_opregion_notify_adapter(dev, opregion_target_state);
650
 
651
	intel_uncore_forcewake_reset(dev, false);
4104 Serge 652
	intel_opregion_fini(dev);
653
 
5354 serge 654
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
4104 Serge 655
 
5060 serge 656
	dev_priv->suspend_count++;
657
 
658
	intel_display_set_init_power(dev_priv, false);
659
 
4104 Serge 660
	return 0;
661
}
662
 
6084 serge 663
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
2325 Serge 664
{
5354 serge 665
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
666
	int ret;
667
 
668
	ret = intel_suspend_complete(dev_priv);
669
 
670
	if (ret) {
671
		DRM_ERROR("Suspend complete failed: %d\n", ret);
672
 
673
		return ret;
674
	}
675
 
676
	pci_disable_device(drm_dev->pdev);
6084 serge 677
	/*
678
	 * During hibernation on some platforms the BIOS may try to access
679
	 * the device even though it's already in D3 and hang the machine. So
680
	 * leave the device in D0 on those platforms and hope the BIOS will
681
	 * power down the device properly. The issue was seen on multiple old
682
	 * GENs with different BIOS vendors, so having an explicit blacklist
683
	 * is inpractical; apply the workaround on everything pre GEN6. The
684
	 * platforms where the issue was seen:
685
	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
686
	 * Fujitsu FSC S7110
687
	 * Acer Aspire 1830T
688
	 */
689
	if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
690
		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
5354 serge 691
 
692
	return 0;
693
}
694
 
6084 serge 695
int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
5354 serge 696
{
4104 Serge 697
	int error;
2325 Serge 698
 
4104 Serge 699
	if (!dev || !dev->dev_private) {
700
		DRM_ERROR("dev: %p\n", dev);
701
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
702
		return -ENODEV;
703
	}
2325 Serge 704
 
5354 serge 705
	if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
706
			 state.event != PM_EVENT_FREEZE))
707
		return -EINVAL;
3031 serge 708
 
4104 Serge 709
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
710
		return 0;
2325 Serge 711
 
5354 serge 712
	error = i915_drm_suspend(dev);
4104 Serge 713
	if (error)
714
		return error;
3031 serge 715
 
6084 serge 716
	return i915_drm_suspend_late(dev, false);
4104 Serge 717
}
2325 Serge 718
 
5354 serge 719
static int i915_drm_resume(struct drm_device *dev)
4104 Serge 720
{
5060 serge 721
	struct drm_i915_private *dev_priv = dev->dev_private;
3260 Serge 722
 
6084 serge 723
	mutex_lock(&dev->struct_mutex);
724
	i915_gem_restore_gtt_mappings(dev);
725
	mutex_unlock(&dev->struct_mutex);
4560 Serge 726
 
4104 Serge 727
	i915_restore_state(dev);
728
	intel_opregion_setup(dev);
729
 
6084 serge 730
	intel_init_pch_refclk(dev);
731
	drm_mode_config_reset(dev);
4104 Serge 732
 
6084 serge 733
	/*
734
	 * Interrupts have to be enabled before any batches are run. If not the
735
	 * GPU will hang. i915_gem_init_hw() will initiate batches to
736
	 * update/restore the context.
737
	 *
738
	 * Modeset enabling in intel_modeset_init_hw() also needs working
739
	 * interrupts.
740
	 */
741
	intel_runtime_pm_enable_interrupts(dev_priv);
4104 Serge 742
 
6084 serge 743
	mutex_lock(&dev->struct_mutex);
744
	if (i915_gem_init_hw(dev)) {
745
		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
746
			atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
747
	}
748
	mutex_unlock(&dev->struct_mutex);
4104 Serge 749
 
6084 serge 750
	intel_guc_resume(dev);
4104 Serge 751
 
6084 serge 752
	intel_modeset_init_hw(dev);
5060 serge 753
 
6084 serge 754
	spin_lock_irq(&dev_priv->irq_lock);
755
	if (dev_priv->display.hpd_irq_setup)
756
		dev_priv->display.hpd_irq_setup(dev);
757
	spin_unlock_irq(&dev_priv->irq_lock);
4104 Serge 758
 
6084 serge 759
	drm_modeset_lock_all(dev);
760
	intel_display_resume(dev);
761
	drm_modeset_unlock_all(dev);
5354 serge 762
 
6084 serge 763
	intel_dp_mst_resume(dev);
4104 Serge 764
 
6084 serge 765
	/*
766
	 * ... but also need to make sure that hotplug processing
767
	 * doesn't cause havoc. Like in the driver load code we don't
768
	 * bother with the tiny race here where we might loose hotplug
769
	 * notifications.
770
	 * */
771
	intel_hpd_init(dev_priv);
772
	/* Config may have changed between suspend and resume */
773
	drm_helper_hpd_irq_event(dev);
774
 
4104 Serge 775
	intel_opregion_init(dev);
776
 
5354 serge 777
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
4104 Serge 778
 
779
	mutex_lock(&dev_priv->modeset_restore_lock);
780
	dev_priv->modeset_restore = MODESET_DONE;
781
	mutex_unlock(&dev_priv->modeset_restore_lock);
4560 Serge 782
 
5060 serge 783
	intel_opregion_notify_adapter(dev, PCI_D0);
784
 
5354 serge 785
	drm_kms_helper_poll_enable(dev);
786
 
5060 serge 787
	return 0;
4104 Serge 788
}
789
 
5354 serge 790
static int i915_drm_resume_early(struct drm_device *dev)
4104 Serge 791
{
5354 serge 792
	struct drm_i915_private *dev_priv = dev->dev_private;
793
	int ret = 0;
4104 Serge 794
 
5060 serge 795
	/*
796
	 * We have a resume ordering issue with the snd-hda driver also
797
	 * requiring our device to be power up. Due to the lack of a
798
	 * parent/child relationship we currently solve this with an early
799
	 * resume hook.
800
	 *
801
	 * FIXME: This should be solved with a special hdmi sink device or
802
	 * similar so that power domains can be employed.
803
	 */
4104 Serge 804
	if (pci_enable_device(dev->pdev))
805
		return -EIO;
806
 
807
	pci_set_master(dev->pdev);
808
 
5354 serge 809
	if (IS_VALLEYVIEW(dev_priv))
810
		ret = vlv_resume_prepare(dev_priv, false);
811
	if (ret)
6084 serge 812
		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
813
			  ret);
5354 serge 814
 
815
	intel_uncore_early_sanitize(dev, true);
816
 
6084 serge 817
	if (IS_BROXTON(dev))
818
		ret = bxt_resume_prepare(dev_priv);
819
	else if (IS_SKYLAKE(dev_priv))
820
		ret = skl_resume_prepare(dev_priv);
821
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5354 serge 822
		hsw_disable_pc8(dev_priv);
823
 
824
	intel_uncore_sanitize(dev);
825
	intel_power_domains_init_hw(dev_priv);
826
 
827
	return ret;
5060 serge 828
}
829
 
6084 serge 830
int i915_resume_switcheroo(struct drm_device *dev)
5060 serge 831
{
832
	int ret;
833
 
5354 serge 834
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
835
		return 0;
836
 
837
	ret = i915_drm_resume_early(dev);
4104 Serge 838
	if (ret)
839
		return ret;
840
 
5354 serge 841
	return i915_drm_resume(dev);
4104 Serge 842
}
843
 
844
/**
845
 * i915_reset - reset chip after a hang
846
 * @dev: drm device to reset
847
 *
848
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
849
 * reset or otherwise an error code.
850
 *
851
 * Procedure is fairly simple:
852
 *   - reset the chip using the reset reg
853
 *   - re-init context state
854
 *   - re-init hardware status page
855
 *   - re-init ring buffer
856
 *   - re-init interrupt state
857
 *   - re-init display
858
 */
859
int i915_reset(struct drm_device *dev)
860
{
5060 serge 861
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 862
	bool simulated;
863
	int ret;
864
 
6084 serge 865
	intel_reset_gt_powersave(dev);
4104 Serge 866
 
867
	mutex_lock(&dev->struct_mutex);
868
 
869
	i915_gem_reset(dev);
870
 
871
	simulated = dev_priv->gpu_error.stop_rings != 0;
872
 
6084 serge 873
	ret = intel_gpu_reset(dev);
4104 Serge 874
 
6084 serge 875
	/* Also reset the gpu hangman. */
876
	if (simulated) {
877
		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
878
		dev_priv->gpu_error.stop_rings = 0;
879
		if (ret == -ENODEV) {
4560 Serge 880
			DRM_INFO("Reset not implemented, but ignoring "
6084 serge 881
				 "error for simulated gpu hangs\n");
882
			ret = 0;
883
		}
4104 Serge 884
	}
4560 Serge 885
 
5354 serge 886
	if (i915_stop_ring_allow_warn(dev_priv))
887
		pr_notice("drm/i915: Resetting chip after gpu hang\n");
888
 
4104 Serge 889
	if (ret) {
4560 Serge 890
		DRM_ERROR("Failed to reset chip: %i\n", ret);
4104 Serge 891
		mutex_unlock(&dev->struct_mutex);
892
		return ret;
893
	}
894
 
895
	/* Ok, now get things going again... */
896
 
897
	/*
898
	 * Everything depends on having the GTT running, so we need to start
899
	 * there.  Fortunately we don't need to do this unless we reset the
900
	 * chip at a PCI level.
901
	 *
902
	 * Next we need to restore the context, but we don't use those
903
	 * yet either...
904
	 *
905
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
906
	 * was running at the time of the reset (i.e. we weren't VT
907
	 * switched away).
908
	 */
909
 
6084 serge 910
	/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
911
	dev_priv->gpu_error.reload_in_reset = true;
5354 serge 912
 
6084 serge 913
	ret = i915_gem_init_hw(dev);
5354 serge 914
 
6084 serge 915
	dev_priv->gpu_error.reload_in_reset = false;
4104 Serge 916
 
6084 serge 917
	mutex_unlock(&dev->struct_mutex);
918
	if (ret) {
919
		DRM_ERROR("Failed hw init on reset %d\n", ret);
920
		return ret;
4104 Serge 921
	}
922
 
6084 serge 923
	/*
924
	 * rps/rc6 re-init is necessary to restore state lost after the
925
	 * reset and the re-install of gt irqs. Skip for ironlake per
926
	 * previous concerns that it doesn't respond well to some forms
927
	 * of re-init after reset.
928
	 */
929
	if (INTEL_INFO(dev)->gen > 5)
930
		intel_enable_gt_powersave(dev);
931
 
4104 Serge 932
	return 0;
933
}
934
 
935
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
936
{
937
	struct intel_device_info *intel_info =
938
		(struct intel_device_info *) ent->driver_data;
939
 
5060 serge 940
	if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
4560 Serge 941
		DRM_INFO("This hardware requires preliminary hardware support.\n"
942
			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
943
		return -ENODEV;
944
	}
945
 
4104 Serge 946
	/* Only bind to function 0 of the device. Early generations
947
	 * used function 1 as a placeholder for multi-head. This causes
948
	 * us confusion instead, especially on the systems where both
949
	 * functions have the same PCI-ID!
950
	 */
951
	if (PCI_FUNC(pdev->devfn))
952
		return -ENODEV;
953
 
954
	return drm_get_pci_dev(pdev, ent, &driver);
955
}
956
 
957
static void
958
i915_pci_remove(struct pci_dev *pdev)
959
{
960
	struct drm_device *dev = pci_get_drvdata(pdev);
961
 
962
	drm_put_dev(dev);
963
}
964
 
965
static int i915_pm_suspend(struct device *dev)
966
{
967
	struct pci_dev *pdev = to_pci_dev(dev);
968
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
969
 
970
	if (!drm_dev || !drm_dev->dev_private) {
971
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
972
		return -ENODEV;
973
	}
974
 
975
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
976
		return 0;
977
 
5354 serge 978
	return i915_drm_suspend(drm_dev);
5060 serge 979
}
4104 Serge 980
 
5060 serge 981
static int i915_pm_suspend_late(struct device *dev)
982
{
6084 serge 983
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
5060 serge 984
 
985
	/*
6084 serge 986
	 * We have a suspend ordering issue with the snd-hda driver also
5060 serge 987
	 * requiring our device to be power up. Due to the lack of a
988
	 * parent/child relationship we currently solve this with an late
989
	 * suspend hook.
990
	 *
991
	 * FIXME: This should be solved with a special hdmi sink device or
992
	 * similar so that power domains can be employed.
993
	 */
994
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
995
		return 0;
996
 
6084 serge 997
	return i915_drm_suspend_late(drm_dev, false);
4104 Serge 998
}
999
 
6084 serge 1000
static int i915_pm_poweroff_late(struct device *dev)
1001
{
1002
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1003
 
1004
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1005
		return 0;
1006
 
1007
	return i915_drm_suspend_late(drm_dev, true);
1008
}
1009
 
5060 serge 1010
static int i915_pm_resume_early(struct device *dev)
1011
{
6084 serge 1012
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
5060 serge 1013
 
5354 serge 1014
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1015
		return 0;
1016
 
1017
	return i915_drm_resume_early(drm_dev);
5060 serge 1018
}
1019
 
4104 Serge 1020
static int i915_pm_resume(struct device *dev)
1021
{
6084 serge 1022
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
4104 Serge 1023
 
5354 serge 1024
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1025
		return 0;
4104 Serge 1026
 
5354 serge 1027
	return i915_drm_resume(drm_dev);
4104 Serge 1028
}
1029
 
6084 serge 1030
static int skl_suspend_complete(struct drm_i915_private *dev_priv)
1031
{
1032
	/* Enabling DC6 is not a hard requirement to enter runtime D3 */
1033
 
1034
	skl_uninit_cdclk(dev_priv);
1035
 
1036
	return 0;
1037
}
1038
 
5354 serge 1039
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
5060 serge 1040
{
1041
	hsw_enable_pc8(dev_priv);
4560 Serge 1042
 
5060 serge 1043
	return 0;
1044
}
1045
 
6084 serge 1046
static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
1047
{
1048
	struct drm_device *dev = dev_priv->dev;
1049
 
1050
	/* TODO: when DC5 support is added disable DC5 here. */
1051
 
1052
	broxton_ddi_phy_uninit(dev);
1053
	broxton_uninit_cdclk(dev);
1054
	bxt_enable_dc9(dev_priv);
1055
 
1056
	return 0;
1057
}
1058
 
1059
static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
1060
{
1061
	struct drm_device *dev = dev_priv->dev;
1062
 
1063
	/* TODO: when CSR FW support is added make sure the FW is loaded */
1064
 
1065
	bxt_disable_dc9(dev_priv);
1066
 
1067
	/*
1068
	 * TODO: when DC5 support is added enable DC5 here if the CSR FW
1069
	 * is available.
1070
	 */
1071
	broxton_init_cdclk(dev);
1072
	broxton_ddi_phy_init(dev);
1073
	intel_prepare_ddi(dev);
1074
 
1075
	return 0;
1076
}
1077
 
1078
static int skl_resume_prepare(struct drm_i915_private *dev_priv)
1079
{
1080
	struct drm_device *dev = dev_priv->dev;
1081
 
1082
	skl_init_cdclk(dev_priv);
1083
	intel_csr_load_program(dev);
1084
 
1085
	return 0;
1086
}
1087
 
5060 serge 1088
/*
1089
 * Save all Gunit registers that may be lost after a D3 and a subsequent
1090
 * S0i[R123] transition. The list of registers needing a save/restore is
1091
 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
1092
 * registers in the following way:
1093
 * - Driver: saved/restored by the driver
1094
 * - Punit : saved/restored by the Punit firmware
1095
 * - No, w/o marking: no need to save/restore, since the register is R/O or
1096
 *                    used internally by the HW in a way that doesn't depend
1097
 *                    keeping the content across a suspend/resume.
1098
 * - Debug : used for debugging
1099
 *
1100
 * We save/restore all registers marked with 'Driver', with the following
1101
 * exceptions:
1102
 * - Registers out of use, including also registers marked with 'Debug'.
1103
 *   These have no effect on the driver's operation, so we don't save/restore
1104
 *   them to reduce the overhead.
1105
 * - Registers that are fully setup by an initialization function called from
1106
 *   the resume path. For example many clock gating and RPS/RC6 registers.
1107
 * - Registers that provide the right functionality with their reset defaults.
1108
 *
1109
 * TODO: Except for registers that based on the above 3 criteria can be safely
1110
 * ignored, we save/restore all others, practically treating the HW context as
1111
 * a black-box for the driver. Further investigation is needed to reduce the
1112
 * saved/restored registers even further, by following the same 3 criteria.
1113
 */
1114
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1115
{
1116
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1117
	int i;
1118
 
1119
	/* GAM 0x4000-0x4770 */
1120
	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
1121
	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
1122
	s->arb_mode		= I915_READ(ARB_MODE);
1123
	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
1124
	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);
1125
 
1126
	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
6084 serge 1127
		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
5060 serge 1128
 
1129
	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
6084 serge 1130
	s->gfx_max_req_count	= I915_READ(GEN7_GFX_MAX_REQ_COUNT);
5060 serge 1131
 
1132
	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
1133
	s->ecochk		= I915_READ(GAM_ECOCHK);
1134
	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
1135
	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);
1136
 
1137
	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);
1138
 
1139
	/* MBC 0x9024-0x91D0, 0x8500 */
1140
	s->g3dctl		= I915_READ(VLV_G3DCTL);
1141
	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
1142
	s->mbctl		= I915_READ(GEN6_MBCTL);
1143
 
1144
	/* GCP 0x9400-0x9424, 0x8100-0x810C */
1145
	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
1146
	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
1147
	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
1148
	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
1149
	s->rstctl		= I915_READ(GEN6_RSTCTL);
1150
	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);
1151
 
1152
	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1153
	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
1154
	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
1155
	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
1156
	s->ecobus		= I915_READ(ECOBUS);
1157
	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
1158
	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
1159
	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
1160
	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
1161
	s->rcedata		= I915_READ(VLV_RCEDATA);
1162
	s->spare2gh		= I915_READ(VLV_SPAREG2H);
1163
 
1164
	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1165
	s->gt_imr		= I915_READ(GTIMR);
1166
	s->gt_ier		= I915_READ(GTIER);
1167
	s->pm_imr		= I915_READ(GEN6_PMIMR);
1168
	s->pm_ier		= I915_READ(GEN6_PMIER);
1169
 
1170
	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
6084 serge 1171
		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
5060 serge 1172
 
1173
	/* GT SA CZ domain, 0x100000-0x138124 */
1174
	s->tilectl		= I915_READ(TILECTL);
1175
	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
1176
	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
1177
	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1178
	s->pmwgicz		= I915_READ(VLV_PMWGICZ);
1179
 
1180
	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
1181
	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
1182
	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
6084 serge 1183
	s->pcbr			= I915_READ(VLV_PCBR);
5060 serge 1184
	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);
1185
 
1186
	/*
1187
	 * Not saving any of:
1188
	 * DFT,		0x9800-0x9EC0
1189
	 * SARB,	0xB000-0xB1FC
1190
	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
1191
	 * PCI CFG
1192
	 */
1193
}
1194
 
1195
static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1196
{
1197
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1198
	u32 val;
1199
	int i;
1200
 
1201
	/* GAM 0x4000-0x4770 */
1202
	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
1203
	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
1204
	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
1205
	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
1206
	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);
1207
 
1208
	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
6084 serge 1209
		I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
5060 serge 1210
 
1211
	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
6084 serge 1212
	I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
5060 serge 1213
 
1214
	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
1215
	I915_WRITE(GAM_ECOCHK,		s->ecochk);
1216
	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
1217
	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);
1218
 
1219
	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);
1220
 
1221
	/* MBC 0x9024-0x91D0, 0x8500 */
1222
	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
1223
	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
1224
	I915_WRITE(GEN6_MBCTL,		s->mbctl);
1225
 
1226
	/* GCP 0x9400-0x9424, 0x8100-0x810C */
1227
	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
1228
	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
1229
	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
1230
	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
1231
	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
1232
	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);
1233
 
1234
	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1235
	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
1236
	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
1237
	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
1238
	I915_WRITE(ECOBUS,		s->ecobus);
1239
	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
1240
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
1241
	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
1242
	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
1243
	I915_WRITE(VLV_RCEDATA,		s->rcedata);
1244
	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);
1245
 
1246
	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1247
	I915_WRITE(GTIMR,		s->gt_imr);
1248
	I915_WRITE(GTIER,		s->gt_ier);
1249
	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
1250
	I915_WRITE(GEN6_PMIER,		s->pm_ier);
1251
 
1252
	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
6084 serge 1253
		I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
5060 serge 1254
 
1255
	/* GT SA CZ domain, 0x100000-0x138124 */
1256
	I915_WRITE(TILECTL,			s->tilectl);
1257
	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
1258
	/*
1259
	 * Preserve the GT allow wake and GFX force clock bit, they are not
1260
	 * be restored, as they are used to control the s0ix suspend/resume
1261
	 * sequence by the caller.
1262
	 */
1263
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
1264
	val &= VLV_GTLC_ALLOWWAKEREQ;
1265
	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
1266
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1267
 
1268
	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1269
	val &= VLV_GFX_CLK_FORCE_ON_BIT;
1270
	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
1271
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1272
 
1273
	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);
1274
 
1275
	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
1276
	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
1277
	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
6084 serge 1278
	I915_WRITE(VLV_PCBR,			s->pcbr);
5060 serge 1279
	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
1280
}
4104 Serge 1281
#endif
1282
 
5060 serge 1283
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1284
{
1285
	u32 val;
1286
	int err;
1287
 
1288
#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1289
 
1290
	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1291
	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1292
	if (force_on)
1293
		val |= VLV_GFX_CLK_FORCE_ON_BIT;
1294
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1295
 
1296
	if (!force_on)
1297
		return 0;
1298
 
1299
	err = wait_for(COND, 20);
1300
	if (err)
1301
		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1302
			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1303
 
1304
	return err;
1305
#undef COND
1306
}
1307
#if 0
1308
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
1309
{
1310
	u32 val;
1311
	int err = 0;
1312
 
1313
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
1314
	val &= ~VLV_GTLC_ALLOWWAKEREQ;
1315
	if (allow)
1316
		val |= VLV_GTLC_ALLOWWAKEREQ;
1317
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1318
	POSTING_READ(VLV_GTLC_WAKE_CTRL);
1319
 
1320
#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1321
	      allow)
1322
	err = wait_for(COND, 1);
1323
	if (err)
1324
		DRM_ERROR("timeout disabling GT waking\n");
1325
	return err;
1326
#undef COND
1327
}
1328
 
1329
static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1330
				 bool wait_for_on)
1331
{
1332
	u32 mask;
1333
	u32 val;
1334
	int err;
1335
 
1336
	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
1337
	val = wait_for_on ? mask : 0;
1338
#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1339
	if (COND)
1340
		return 0;
1341
 
1342
	DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
1343
			wait_for_on ? "on" : "off",
1344
			I915_READ(VLV_GTLC_PW_STATUS));
1345
 
1346
	/*
1347
	 * RC6 transitioning can be delayed up to 2 msec (see
1348
	 * valleyview_enable_rps), use 3 msec for safety.
1349
	 */
1350
	err = wait_for(COND, 3);
1351
	if (err)
1352
		DRM_ERROR("timeout waiting for GT wells to go %s\n",
1353
			  wait_for_on ? "on" : "off");
1354
 
1355
	return err;
1356
#undef COND
1357
}
1358
 
1359
static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
1360
{
1361
	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
1362
		return;
1363
 
1364
	DRM_ERROR("GT register access while GT waking disabled\n");
1365
	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1366
}
1367
 
5354 serge 1368
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
5060 serge 1369
{
1370
	u32 mask;
1371
	int err;
1372
 
1373
	/*
1374
	 * Bspec defines the following GT well on flags as debug only, so
1375
	 * don't treat them as hard failures.
1376
	 */
1377
	(void)vlv_wait_for_gt_wells(dev_priv, false);
1378
 
1379
	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
1380
	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
1381
 
1382
	vlv_check_no_gt_access(dev_priv);
1383
 
1384
	err = vlv_force_gfx_clock(dev_priv, true);
1385
	if (err)
1386
		goto err1;
1387
 
1388
	err = vlv_allow_gt_wake(dev_priv, false);
1389
	if (err)
1390
		goto err2;
1391
 
6084 serge 1392
	if (!IS_CHERRYVIEW(dev_priv->dev))
1393
		vlv_save_gunit_s0ix_state(dev_priv);
1394
 
5060 serge 1395
	err = vlv_force_gfx_clock(dev_priv, false);
1396
	if (err)
1397
		goto err2;
1398
 
1399
	return 0;
1400
 
1401
err2:
1402
	/* For safety always re-enable waking and disable gfx clock forcing */
1403
	vlv_allow_gt_wake(dev_priv, true);
1404
err1:
1405
	vlv_force_gfx_clock(dev_priv, false);
1406
 
1407
	return err;
1408
}
1409
 
5354 serge 1410
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1411
				bool rpm_resume)
5060 serge 1412
{
1413
	struct drm_device *dev = dev_priv->dev;
1414
	int err;
1415
	int ret;
1416
 
1417
	/*
1418
	 * If any of the steps fail just try to continue, that's the best we
1419
	 * can do at this point. Return the first error code (which will also
1420
	 * leave RPM permanently disabled).
1421
	 */
1422
	ret = vlv_force_gfx_clock(dev_priv, true);
1423
 
6084 serge 1424
	if (!IS_CHERRYVIEW(dev_priv->dev))
1425
		vlv_restore_gunit_s0ix_state(dev_priv);
5060 serge 1426
 
1427
	err = vlv_allow_gt_wake(dev_priv, true);
1428
	if (!ret)
1429
		ret = err;
1430
 
1431
	err = vlv_force_gfx_clock(dev_priv, false);
1432
	if (!ret)
1433
		ret = err;
1434
 
1435
	vlv_check_no_gt_access(dev_priv);
1436
 
5354 serge 1437
	if (rpm_resume) {
6084 serge 1438
		intel_init_clock_gating(dev);
1439
		i915_gem_restore_fences(dev);
5354 serge 1440
	}
5060 serge 1441
 
1442
	return ret;
1443
}
1444
 
1445
static int intel_runtime_suspend(struct device *device)
1446
{
1447
	struct pci_dev *pdev = to_pci_dev(device);
1448
	struct drm_device *dev = pci_get_drvdata(pdev);
1449
	struct drm_i915_private *dev_priv = dev->dev_private;
1450
	int ret;
1451
 
1452
	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1453
		return -ENODEV;
1454
 
5354 serge 1455
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1456
		return -ENODEV;
1457
 
5060 serge 1458
	DRM_DEBUG_KMS("Suspending device\n");
1459
 
1460
	/*
1461
	 * We could deadlock here in case another thread holding struct_mutex
1462
	 * calls RPM suspend concurrently, since the RPM suspend will wait
1463
	 * first for this RPM suspend to finish. In this case the concurrent
1464
	 * RPM resume will be followed by its RPM suspend counterpart. Still
1465
	 * for consistency return -EAGAIN, which will reschedule this suspend.
1466
	 */
1467
	if (!mutex_trylock(&dev->struct_mutex)) {
1468
		DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1469
		/*
1470
		 * Bump the expiration timestamp, otherwise the suspend won't
1471
		 * be rescheduled.
1472
		 */
1473
		pm_runtime_mark_last_busy(device);
1474
 
1475
		return -EAGAIN;
1476
	}
1477
	/*
1478
	 * We are safe here against re-faults, since the fault handler takes
1479
	 * an RPM reference.
1480
	 */
1481
	i915_gem_release_all_mmaps(dev_priv);
1482
	mutex_unlock(&dev->struct_mutex);
1483
 
6084 serge 1484
	intel_guc_suspend(dev);
1485
 
5354 serge 1486
	intel_suspend_gt_powersave(dev);
1487
	intel_runtime_pm_disable_interrupts(dev_priv);
5060 serge 1488
 
5354 serge 1489
	ret = intel_suspend_complete(dev_priv);
5060 serge 1490
	if (ret) {
1491
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
5354 serge 1492
		intel_runtime_pm_enable_interrupts(dev_priv);
5060 serge 1493
 
1494
		return ret;
1495
	}
1496
 
6084 serge 1497
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1498
	intel_uncore_forcewake_reset(dev, false);
5060 serge 1499
	dev_priv->pm.suspended = true;
1500
 
1501
	/*
5354 serge 1502
	 * FIXME: We really should find a document that references the arguments
1503
	 * used below!
1504
	 */
6084 serge 1505
	if (IS_BROADWELL(dev)) {
5354 serge 1506
		/*
1507
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1508
		 * being detected, and the call we do at intel_runtime_resume()
1509
		 * won't be able to restore them. Since PCI_D3hot matches the
6084 serge 1510
		 * actual specification and appears to be working, use it.
5354 serge 1511
		 */
1512
		intel_opregion_notify_adapter(dev, PCI_D3hot);
6084 serge 1513
	} else {
1514
		/*
1515
		 * current versions of firmware which depend on this opregion
1516
		 * notification have repurposed the D1 definition to mean
1517
		 * "runtime suspended" vs. what you would normally expect (D3)
1518
		 * to distinguish it from notifications that might be sent via
1519
		 * the suspend path.
1520
		 */
1521
		intel_opregion_notify_adapter(dev, PCI_D1);
5354 serge 1522
	}
5060 serge 1523
 
6084 serge 1524
	assert_forcewakes_inactive(dev_priv);
1525
 
5060 serge 1526
	DRM_DEBUG_KMS("Device suspended\n");
1527
	return 0;
1528
}
1529
 
1530
static int intel_runtime_resume(struct device *device)
1531
{
1532
	struct pci_dev *pdev = to_pci_dev(device);
1533
	struct drm_device *dev = pci_get_drvdata(pdev);
1534
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 1535
	int ret = 0;
5060 serge 1536
 
5354 serge 1537
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1538
		return -ENODEV;
5060 serge 1539
 
1540
	DRM_DEBUG_KMS("Resuming device\n");
1541
 
1542
	intel_opregion_notify_adapter(dev, PCI_D0);
1543
	dev_priv->pm.suspended = false;
1544
 
6084 serge 1545
	intel_guc_resume(dev);
1546
 
5354 serge 1547
	if (IS_GEN6(dev_priv))
1548
		intel_init_pch_refclk(dev);
6084 serge 1549
 
1550
	if (IS_BROXTON(dev))
1551
		ret = bxt_resume_prepare(dev_priv);
1552
	else if (IS_SKYLAKE(dev))
1553
		ret = skl_resume_prepare(dev_priv);
5354 serge 1554
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1555
		hsw_disable_pc8(dev_priv);
1556
	else if (IS_VALLEYVIEW(dev_priv))
1557
		ret = vlv_resume_prepare(dev_priv, true);
5060 serge 1558
 
1559
	/*
1560
	 * No point of rolling back things in case of an error, as the best
1561
	 * we can do is to hope that things will still work (and disable RPM).
1562
	 */
1563
	i915_gem_init_swizzling(dev);
1564
	gen6_update_ring_freq(dev);
1565
 
5354 serge 1566
	intel_runtime_pm_enable_interrupts(dev_priv);
6084 serge 1567
 
1568
	/*
1569
	 * On VLV/CHV display interrupts are part of the display
1570
	 * power well, so hpd is reinitialized from there. For
1571
	 * everyone else do it here.
1572
	 */
1573
	if (!IS_VALLEYVIEW(dev_priv))
1574
		intel_hpd_init(dev_priv);
1575
 
5354 serge 1576
	intel_enable_gt_powersave(dev);
5060 serge 1577
 
1578
	if (ret)
1579
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1580
	else
1581
		DRM_DEBUG_KMS("Device resumed\n");
1582
 
1583
	return ret;
1584
}
1585
 
5354 serge 1586
/*
1587
 * This function implements common functionality of runtime and system
1588
 * suspend sequence.
1589
 */
1590
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1591
{
1592
	int ret;
1593
 
6084 serge 1594
	if (IS_BROXTON(dev_priv))
1595
		ret = bxt_suspend_complete(dev_priv);
1596
	else if (IS_SKYLAKE(dev_priv))
1597
		ret = skl_suspend_complete(dev_priv);
1598
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5354 serge 1599
		ret = hsw_suspend_complete(dev_priv);
6084 serge 1600
	else if (IS_VALLEYVIEW(dev_priv))
5354 serge 1601
		ret = vlv_suspend_complete(dev_priv);
1602
	else
1603
		ret = 0;
1604
 
1605
	return ret;
1606
}
1607
 
5060 serge 1608
static const struct dev_pm_ops i915_pm_ops = {
5354 serge 1609
	/*
1610
	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1611
	 * PMSG_RESUME]
1612
	 */
5060 serge 1613
	.suspend = i915_pm_suspend,
1614
	.suspend_late = i915_pm_suspend_late,
1615
	.resume_early = i915_pm_resume_early,
1616
	.resume = i915_pm_resume,
5354 serge 1617
 
1618
	/*
1619
	 * S4 event handlers
1620
	 * @freeze, @freeze_late    : called (1) before creating the
1621
	 *                            hibernation image [PMSG_FREEZE] and
1622
	 *                            (2) after rebooting, before restoring
1623
	 *                            the image [PMSG_QUIESCE]
1624
	 * @thaw, @thaw_early       : called (1) after creating the hibernation
1625
	 *                            image, before writing it [PMSG_THAW]
1626
	 *                            and (2) after failing to create or
1627
	 *                            restore the image [PMSG_RECOVER]
1628
	 * @poweroff, @poweroff_late: called after writing the hibernation
1629
	 *                            image, before rebooting [PMSG_HIBERNATE]
1630
	 * @restore, @restore_early : called after rebooting and restoring the
1631
	 *                            hibernation image [PMSG_RESTORE]
1632
	 */
1633
	.freeze = i915_pm_suspend,
1634
	.freeze_late = i915_pm_suspend_late,
1635
	.thaw_early = i915_pm_resume_early,
1636
	.thaw = i915_pm_resume,
1637
	.poweroff = i915_pm_suspend,
6084 serge 1638
	.poweroff_late = i915_pm_poweroff_late,
5060 serge 1639
	.restore_early = i915_pm_resume_early,
1640
	.restore = i915_pm_resume,
5354 serge 1641
 
1642
	/* S0ix (via runtime suspend) event handlers */
5060 serge 1643
	.runtime_suspend = intel_runtime_suspend,
1644
	.runtime_resume = intel_runtime_resume,
1645
};
1646
 
1647
static const struct vm_operations_struct i915_gem_vm_ops = {
1648
	.fault = i915_gem_fault,
1649
	.open = drm_gem_vm_open,
1650
	.close = drm_gem_vm_close,
1651
};
1652
 
1653
static const struct file_operations i915_driver_fops = {
1654
	.owner = THIS_MODULE,
1655
	.open = drm_open,
1656
	.release = drm_release,
1657
	.unlocked_ioctl = drm_ioctl,
1658
	.mmap = drm_gem_mmap,
1659
	.poll = drm_poll,
1660
	.read = drm_read,
1661
#ifdef CONFIG_COMPAT
1662
	.compat_ioctl = i915_compat_ioctl,
1663
#endif
1664
	.llseek = noop_llseek,
1665
};
1666
#endif
1667
 
3260 Serge 1668
static struct drm_driver driver = {
6084 serge 1669
	/* Don't use MTRRs here; the Xserver or userspace app should
1670
	 * deal with them for Intel hardware.
1671
	 */
1672
	.driver_features =
4104 Serge 1673
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
6084 serge 1674
	    DRIVER_RENDER | DRIVER_MODESET,
1675
	.load = i915_driver_load,
3260 Serge 1676
//    .unload = i915_driver_unload,
3263 Serge 1677
      .open = i915_driver_open,
3260 Serge 1678
//    .lastclose = i915_driver_lastclose,
1679
//    .preclose = i915_driver_preclose,
1680
//    .postclose = i915_driver_postclose,
6084 serge 1681
//	.set_busid = drm_pci_set_busid,
3260 Serge 1682
 
4104 Serge 1683
#if defined(CONFIG_DEBUG_FS)
1684
	.debugfs_init = i915_debugfs_init,
1685
	.debugfs_cleanup = i915_debugfs_cleanup,
1686
#endif
3260 Serge 1687
    .gem_free_object = i915_gem_free_object,
1688
 
1689
//    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1690
//    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1691
//    .gem_prime_export = i915_gem_prime_export,
1692
//    .gem_prime_import = i915_gem_prime_import,
1693
 
1694
//    .dumb_create = i915_gem_dumb_create,
1695
//    .dumb_map_offset = i915_gem_mmap_gtt,
1696
//    .dumb_destroy = i915_gem_dumb_destroy,
1697
//    .ioctls = i915_ioctls,
1698
//    .fops = &i915_driver_fops,
1699
//    .name = DRIVER_NAME,
1700
//    .desc = DRIVER_DESC,
1701
//    .date = DRIVER_DATE,
1702
//    .major = DRIVER_MAJOR,
1703
//    .minor = DRIVER_MINOR,
1704
//    .patchlevel = DRIVER_PATCHLEVEL,
1705
};
1706
 
1707
 
3243 Serge 1708
 
3255 Serge 1709
 
4104 Serge 1710
int i915_init(void)
1711
{
1712
    static pci_dev_t device;
1713
    const struct pci_device_id  *ent;
1714
    int  err;
2325 Serge 1715
 
4104 Serge 1716
    ent = find_pci_device(&device, pciidlist);
1717
    if( unlikely(ent == NULL) )
1718
    {
1719
        dbgprintf("device not found\n");
1720
        return -ENODEV;
1721
    };
2325 Serge 1722
 
4104 Serge 1723
    drm_core_init();
3255 Serge 1724
 
4104 Serge 1725
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
1726
                                device.pci_dev.device);
2325 Serge 1727
 
4293 Serge 1728
    driver.driver_features |= DRIVER_MODESET;
1729
 
4104 Serge 1730
    err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
3263 Serge 1731
 
4104 Serge 1732
    return err;
1733
}
2325 Serge 1734
 
2330 Serge 1735
 
6084 serge 1736
MODULE_AUTHOR("Tungsten Graphics, Inc.");
1737
MODULE_AUTHOR("Intel Corporation");
2325 Serge 1738
 
6084 serge 1739
MODULE_DESCRIPTION(DRIVER_DESC);
1740
MODULE_LICENSE("GPL and additional rights");