Subversion Repositories Kolibri OS

Rev

Rev 6660 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6660 Rev 6937
Line 35... Line 35...
35
#include "i915_trace.h"
35
#include "i915_trace.h"
36
#include "intel_drv.h"
36
#include "intel_drv.h"
Line 37... Line 37...
37
 
37
 
38
#include 
38
#include 
39
#include 
-
 
40
#include 
-
 
41
#include 
-
 
42
#include 
-
 
43
 
39
#include 
Line 44... Line 40...
44
#include 
40
#include 
Line -... Line 41...
-
 
41
 
-
 
42
#include 
-
 
43
 
-
 
44
int init_display_kms(struct drm_device *dev);
45
 
45
 
Line 46... Line 46...
46
#include 
46
extern int intel_agp_enabled;
47
 
47
 
48
static struct drm_driver driver;
48
static struct drm_driver driver;
Line 66... Line 66...
66
	.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
66
	.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
Line 67... Line 67...
67
 
67
 
68
#define IVB_CURSOR_OFFSETS \
68
#define IVB_CURSOR_OFFSETS \
Line 69... Line -...
69
	.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
-
 
70
 
-
 
71
int init_display_kms(struct drm_device *dev);
-
 
72
 
-
 
Line 73... Line -...
73
 
-
 
Line 74... Line 69...
74
extern int intel_agp_enabled;
69
	.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
75
 
70
 
76
#define PCI_VENDOR_ID_INTEL        0x8086
71
 
Line 205... Line 200...
205
#define GEN7_FEATURES  \
200
#define GEN7_FEATURES  \
206
	.gen = 7, .num_pipes = 3, \
201
	.gen = 7, .num_pipes = 3, \
207
	.need_gfx_hws = 1, .has_hotplug = 1, \
202
	.need_gfx_hws = 1, .has_hotplug = 1, \
208
	.has_fbc = 1, \
203
	.has_fbc = 1, \
209
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
204
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
210
	.has_llc = 1
205
	.has_llc = 1, \
-
 
206
	GEN_DEFAULT_PIPEOFFSETS, \
-
 
207
	IVB_CURSOR_OFFSETS
Line 211... Line 208...
211
 
208
 
212
static const struct intel_device_info intel_ivybridge_d_info = {
209
static const struct intel_device_info intel_ivybridge_d_info = {
213
	GEN7_FEATURES,
210
	GEN7_FEATURES,
214
	.is_ivybridge = 1,
-
 
215
	GEN_DEFAULT_PIPEOFFSETS,
-
 
216
	IVB_CURSOR_OFFSETS,
211
	.is_ivybridge = 1,
Line 217... Line 212...
217
};
212
};
218
 
213
 
219
static const struct intel_device_info intel_ivybridge_m_info = {
214
static const struct intel_device_info intel_ivybridge_m_info = {
220
	GEN7_FEATURES,
215
	GEN7_FEATURES,
221
	.is_ivybridge = 1,
-
 
222
	.is_mobile = 1,
-
 
223
	GEN_DEFAULT_PIPEOFFSETS,
216
	.is_ivybridge = 1,
Line 224... Line 217...
224
	IVB_CURSOR_OFFSETS,
217
	.is_mobile = 1,
225
};
218
};
226
 
219
 
227
static const struct intel_device_info intel_ivybridge_q_info = {
220
static const struct intel_device_info intel_ivybridge_q_info = {
228
	GEN7_FEATURES,
-
 
229
	.is_ivybridge = 1,
-
 
230
	.num_pipes = 0, /* legal, last one wins */
221
	GEN7_FEATURES,
Line -... Line 222...
-
 
222
	.is_ivybridge = 1,
-
 
223
	.num_pipes = 0, /* legal, last one wins */
-
 
224
};
-
 
225
 
-
 
226
#define VLV_FEATURES  \
-
 
227
	.gen = 7, .num_pipes = 2, \
-
 
228
	.need_gfx_hws = 1, .has_hotplug = 1, \
-
 
229
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
231
	GEN_DEFAULT_PIPEOFFSETS,
230
	.display_mmio_offset = VLV_DISPLAY_BASE, \
232
	IVB_CURSOR_OFFSETS,
231
	GEN_DEFAULT_PIPEOFFSETS, \
233
};
-
 
234
 
-
 
235
static const struct intel_device_info intel_valleyview_m_info = {
232
	CURSOR_OFFSETS
236
	GEN7_FEATURES,
-
 
237
	.is_mobile = 1,
-
 
238
	.num_pipes = 2,
-
 
239
	.is_valleyview = 1,
-
 
240
	.display_mmio_offset = VLV_DISPLAY_BASE,
233
 
241
	.has_fbc = 0, /* legal, last one wins */
234
static const struct intel_device_info intel_valleyview_m_info = {
Line 242... Line 235...
242
	.has_llc = 0, /* legal, last one wins */
235
	VLV_FEATURES,
243
	GEN_DEFAULT_PIPEOFFSETS,
236
	.is_valleyview = 1,
244
	CURSOR_OFFSETS,
-
 
245
};
237
	.is_mobile = 1,
246
 
-
 
247
static const struct intel_device_info intel_valleyview_d_info = {
-
 
248
	GEN7_FEATURES,
-
 
249
	.num_pipes = 2,
-
 
250
	.is_valleyview = 1,
-
 
251
	.display_mmio_offset = VLV_DISPLAY_BASE,
238
};
Line -... Line 239...
-
 
239
 
-
 
240
static const struct intel_device_info intel_valleyview_d_info = {
-
 
241
	VLV_FEATURES,
-
 
242
	.is_valleyview = 1,
-
 
243
};
-
 
244
 
252
	.has_fbc = 0, /* legal, last one wins */
245
#define HSW_FEATURES  \
253
	.has_llc = 0, /* legal, last one wins */
246
	GEN7_FEATURES, \
254
	GEN_DEFAULT_PIPEOFFSETS,
247
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
255
	CURSOR_OFFSETS,
-
 
256
};
-
 
257
 
-
 
258
static const struct intel_device_info intel_haswell_d_info = {
-
 
259
	GEN7_FEATURES,
-
 
260
	.is_haswell = 1,
248
	.has_ddi = 1, \
Line 261... Line 249...
261
	.has_ddi = 1,
249
	.has_fpga_dbg = 1
262
	.has_fpga_dbg = 1,
250
 
263
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
251
static const struct intel_device_info intel_haswell_d_info = {
264
	GEN_DEFAULT_PIPEOFFSETS,
252
	HSW_FEATURES,
265
	IVB_CURSOR_OFFSETS,
-
 
266
};
-
 
267
 
-
 
268
static const struct intel_device_info intel_haswell_m_info = {
-
 
269
	GEN7_FEATURES,
-
 
270
	.is_haswell = 1,
253
	.is_haswell = 1,
Line 271... Line 254...
271
	.is_mobile = 1,
254
};
272
	.has_ddi = 1,
-
 
273
	.has_fpga_dbg = 1,
-
 
274
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-
 
275
	GEN_DEFAULT_PIPEOFFSETS,
255
 
276
	IVB_CURSOR_OFFSETS,
-
 
277
};
-
 
278
 
256
static const struct intel_device_info intel_haswell_m_info = {
279
static const struct intel_device_info intel_broadwell_d_info = {
-
 
280
	.gen = 8, .num_pipes = 3,
-
 
281
	.need_gfx_hws = 1, .has_hotplug = 1,
257
	HSW_FEATURES,
Line 282... Line 258...
282
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
258
	.is_haswell = 1,
283
	.has_llc = 1,
-
 
284
	.has_ddi = 1,
-
 
285
	.has_fpga_dbg = 1,
-
 
286
	.has_fbc = 1,
259
	.is_mobile = 1,
287
	GEN_DEFAULT_PIPEOFFSETS,
-
 
288
	IVB_CURSOR_OFFSETS,
260
};
289
};
-
 
290
 
-
 
291
static const struct intel_device_info intel_broadwell_m_info = {
-
 
292
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
261
 
Line 293... Line 262...
293
	.need_gfx_hws = 1, .has_hotplug = 1,
262
static const struct intel_device_info intel_broadwell_d_info = {
294
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
263
	HSW_FEATURES,
295
	.has_llc = 1,
264
	.gen = 8,
296
	.has_ddi = 1,
265
};
297
	.has_fpga_dbg = 1,
-
 
298
	.has_fbc = 1,
-
 
299
	GEN_DEFAULT_PIPEOFFSETS,
-
 
300
	IVB_CURSOR_OFFSETS,
-
 
301
};
-
 
302
 
-
 
303
static const struct intel_device_info intel_broadwell_gt3d_info = {
266
 
Line 304... Line 267...
304
	.gen = 8, .num_pipes = 3,
267
static const struct intel_device_info intel_broadwell_m_info = {
305
	.need_gfx_hws = 1, .has_hotplug = 1,
268
	HSW_FEATURES,
306
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
269
	.gen = 8, .is_mobile = 1,
307
	.has_llc = 1,
270
};
308
	.has_ddi = 1,
-
 
309
	.has_fpga_dbg = 1,
-
 
310
	.has_fbc = 1,
-
 
311
	GEN_DEFAULT_PIPEOFFSETS,
-
 
312
	IVB_CURSOR_OFFSETS,
-
 
313
};
-
 
314
 
271
 
Line 315... Line 272...
315
static const struct intel_device_info intel_broadwell_gt3m_info = {
272
static const struct intel_device_info intel_broadwell_gt3d_info = {
316
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
273
	HSW_FEATURES,
317
	.need_gfx_hws = 1, .has_hotplug = 1,
274
	.gen = 8,
318
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
275
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
319
	.has_llc = 1,
276
};
320
	.has_ddi = 1,
277
 
321
	.has_fpga_dbg = 1,
278
static const struct intel_device_info intel_broadwell_gt3m_info = {
322
	.has_fbc = 1,
279
	HSW_FEATURES,
323
	GEN_DEFAULT_PIPEOFFSETS,
280
	.gen = 8, .is_mobile = 1,
Line 324... Line 281...
324
	IVB_CURSOR_OFFSETS,
281
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-
 
282
};
325
};
283
 
326
 
-
 
327
static const struct intel_device_info intel_cherryview_info = {
-
 
328
	.gen = 8, .num_pipes = 3,
-
 
329
	.need_gfx_hws = 1, .has_hotplug = 1,
-
 
330
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-
 
331
	.is_valleyview = 1,
-
 
332
	.display_mmio_offset = VLV_DISPLAY_BASE,
284
static const struct intel_device_info intel_cherryview_info = {
333
	GEN_CHV_PIPEOFFSETS,
-
 
334
	CURSOR_OFFSETS,
-
 
335
};
285
	.gen = 8, .num_pipes = 3,
Line 336... Line 286...
336
 
286
	.need_gfx_hws = 1, .has_hotplug = 1,
-
 
287
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
337
static const struct intel_device_info intel_skylake_info = {
288
	.is_cherryview = 1,
338
	.is_skylake = 1,
289
	.display_mmio_offset = VLV_DISPLAY_BASE,
339
	.gen = 9, .num_pipes = 3,
-
 
340
	.need_gfx_hws = 1, .has_hotplug = 1,
290
	GEN_CHV_PIPEOFFSETS,
341
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-
 
342
	.has_llc = 1,
-
 
343
	.has_ddi = 1,
-
 
344
	.has_fpga_dbg = 1,
-
 
345
	.has_fbc = 1,
-
 
346
	GEN_DEFAULT_PIPEOFFSETS,
-
 
347
	IVB_CURSOR_OFFSETS,
291
	CURSOR_OFFSETS,
Line 348... Line 292...
348
};
292
};
349
 
293
 
-
 
294
static const struct intel_device_info intel_skylake_info = {
350
static const struct intel_device_info intel_skylake_gt3_info = {
295
	HSW_FEATURES,
351
	.is_skylake = 1,
296
	.is_skylake = 1,
352
	.gen = 9, .num_pipes = 3,
297
	.gen = 9,
353
	.need_gfx_hws = 1, .has_hotplug = 1,
298
};
354
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
299
 
Line 371... Line 316...
371
	.has_fbc = 1,
316
	.has_fbc = 1,
372
	GEN_DEFAULT_PIPEOFFSETS,
317
	GEN_DEFAULT_PIPEOFFSETS,
373
	IVB_CURSOR_OFFSETS,
318
	IVB_CURSOR_OFFSETS,
374
};
319
};
Line -... Line 320...
-
 
320
 
-
 
321
static const struct intel_device_info intel_kabylake_info = {
-
 
322
	HSW_FEATURES,
-
 
323
	.is_preliminary = 1,
-
 
324
	.is_kabylake = 1,
-
 
325
	.gen = 9,
-
 
326
};
-
 
327
 
-
 
328
static const struct intel_device_info intel_kabylake_gt3_info = {
-
 
329
	HSW_FEATURES,
-
 
330
	.is_preliminary = 1,
-
 
331
	.is_kabylake = 1,
-
 
332
	.gen = 9,
-
 
333
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-
 
334
};
375
 
335
 
376
/*
336
/*
377
 * Make sure any device matches here are from most specific to most
337
 * Make sure any device matches here are from most specific to most
378
 * general.  For example, since the Quanta match is based on the subsystem
338
 * general.  For example, since the Quanta match is based on the subsystem
379
 * and subvendor IDs, we need it to come before the more general IVB
339
 * and subvendor IDs, we need it to come before the more general IVB
380
 * PCI ID matches, otherwise we'll use the wrong info struct above.
340
 * PCI ID matches, otherwise we'll use the wrong info struct above.
381
 */
341
 */
382
#define INTEL_PCI_IDS \
342
static const struct pci_device_id pciidlist[] = {
383
	INTEL_I915G_IDS(&intel_i915g_info),	\
343
	INTEL_I915G_IDS(&intel_i915g_info),
384
	INTEL_I915GM_IDS(&intel_i915gm_info),	\
344
	INTEL_I915GM_IDS(&intel_i915gm_info),
385
	INTEL_I945G_IDS(&intel_i945g_info),	\
345
	INTEL_I945G_IDS(&intel_i945g_info),
386
	INTEL_I945GM_IDS(&intel_i945gm_info),	\
346
	INTEL_I945GM_IDS(&intel_i945gm_info),
387
	INTEL_I965G_IDS(&intel_i965g_info),	\
347
	INTEL_I965G_IDS(&intel_i965g_info),
388
	INTEL_G33_IDS(&intel_g33_info),		\
348
	INTEL_G33_IDS(&intel_g33_info),
389
	INTEL_I965GM_IDS(&intel_i965gm_info),	\
349
	INTEL_I965GM_IDS(&intel_i965gm_info),
390
	INTEL_GM45_IDS(&intel_gm45_info), 	\
350
	INTEL_GM45_IDS(&intel_gm45_info),
391
	INTEL_G45_IDS(&intel_g45_info), 	\
351
	INTEL_G45_IDS(&intel_g45_info),
392
	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
352
	INTEL_PINEVIEW_IDS(&intel_pineview_info),
393
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
353
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
394
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
354
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
395
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
355
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
396
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
356
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
397
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
357
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
398
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
358
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
399
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
359
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
400
	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
360
	INTEL_HSW_D_IDS(&intel_haswell_d_info),
401
	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
361
	INTEL_HSW_M_IDS(&intel_haswell_m_info),
402
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
362
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),
403
	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
363
	INTEL_VLV_D_IDS(&intel_valleyview_d_info),
404
	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),	\
364
	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
405
	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),	\
365
	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
406
	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),	\
366
	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
407
	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
367
	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
408
	INTEL_CHV_IDS(&intel_cherryview_info),	\
368
	INTEL_CHV_IDS(&intel_cherryview_info),
409
	INTEL_SKL_GT1_IDS(&intel_skylake_info),	\
369
	INTEL_SKL_GT1_IDS(&intel_skylake_info),
410
	INTEL_SKL_GT2_IDS(&intel_skylake_info),	\
370
	INTEL_SKL_GT2_IDS(&intel_skylake_info),
-
 
371
	INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
411
	INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),	\
372
	INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
412
	INTEL_BXT_IDS(&intel_broxton_info)
-
 
413
 
373
	INTEL_BXT_IDS(&intel_broxton_info),
414
static const struct pci_device_id pciidlist[] = {		/* aka */
374
	INTEL_KBL_GT1_IDS(&intel_kabylake_info),
-
 
375
	INTEL_KBL_GT2_IDS(&intel_kabylake_info),
-
 
376
	INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
415
	INTEL_PCI_IDS,
377
	INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
416
	{0, 0, 0}
378
	{0, 0, 0}
Line 417... Line -...
417
};
-
 
418
 
-
 
419
#define INTEL_PCH_DEVICE_ID_MASK        0xff00
-
 
420
#define INTEL_PCH_IBX_DEVICE_ID_TYPE    0x3b00
-
 
421
#define INTEL_PCH_CPT_DEVICE_ID_TYPE    0x1c00
-
 
-
 
379
};
-
 
380
 
422
#define INTEL_PCH_PPT_DEVICE_ID_TYPE    0x1e00
381
 
423
#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
382
 
424
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
383
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
Line 425... Line 384...
425
{
384
{
Line 439... Line 398...
439
		ret = PCH_CPT;
398
		ret = PCH_CPT;
440
		DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
399
		DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
441
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
400
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
442
		ret = PCH_LPT;
401
		ret = PCH_LPT;
443
		DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
402
		DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
444
	} else if (IS_SKYLAKE(dev)) {
403
	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
445
		ret = PCH_SPT;
404
		ret = PCH_SPT;
446
		DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
405
		DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
447
	}
406
	}
Line 448... Line 407...
448
 
407
 
Line 502... Line 461...
502
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
461
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
503
				WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
462
				WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
504
			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
463
			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
505
				dev_priv->pch_type = PCH_SPT;
464
				dev_priv->pch_type = PCH_SPT;
506
				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
465
				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
507
				WARN_ON(!IS_SKYLAKE(dev));
466
				WARN_ON(!IS_SKYLAKE(dev) &&
-
 
467
					!IS_KABYLAKE(dev));
508
			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
468
			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
509
				dev_priv->pch_type = PCH_SPT;
469
				dev_priv->pch_type = PCH_SPT;
510
				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
470
				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
511
				WARN_ON(!IS_SKYLAKE(dev));
471
				WARN_ON(!IS_SKYLAKE(dev) &&
-
 
472
					!IS_KABYLAKE(dev));
512
			} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
473
			} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
513
				   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
474
				   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
514
				    pch->subsystem_vendor == 0x1af4 &&
475
				    pch->subsystem_vendor == 0x1af4 &&
515
				    pch->subsystem_device == 0x1100)) {
476
				    pch->subsystem_device == 0x1100)) {
516
				dev_priv->pch_type = intel_virt_detect_pch(dev);
477
				dev_priv->pch_type = intel_virt_detect_pch(dev);
Line 550... Line 511...
550
 
511
 
551
	return true;
512
	return true;
Line 552... Line 513...
552
}
513
}
553
 
-
 
554
#if 0
-
 
555
void i915_firmware_load_error_print(const char *fw_path, int err)
-
 
556
{
-
 
557
	DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
-
 
558
 
-
 
559
	/*
-
 
560
	 * If the reason is not known assume -ENOENT since that's the most
-
 
561
	 * usual failure mode.
-
 
562
	 */
-
 
563
	if (!err)
-
 
564
		err = -ENOENT;
-
 
565
 
-
 
566
	if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
-
 
567
		return;
-
 
568
 
-
 
569
	DRM_ERROR(
-
 
570
	  "The driver is built-in, so to load the firmware you need to\n"
-
 
571
	  "include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
-
 
Line 572... Line 514...
572
	  "in your initrd/initramfs image.\n");
514
 
573
}
515
#if 0
574
 
516
 
575
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
517
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
Line 576... Line 518...
576
{
518
{
577
	struct drm_device *dev = dev_priv->dev;
519
	struct drm_device *dev = dev_priv->dev;
578
	struct drm_encoder *encoder;
-
 
579
 
-
 
580
	drm_modeset_lock_all(dev);
520
	struct intel_encoder *encoder;
581
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
521
 
582
		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
-
 
583
 
522
	drm_modeset_lock_all(dev);
584
		if (intel_encoder->suspend)
523
	for_each_intel_encoder(dev, encoder)
Line 585... Line 524...
585
			intel_encoder->suspend(intel_encoder);
524
		if (encoder->suspend)
586
	}
525
			encoder->suspend(encoder);
587
	drm_modeset_unlock_all(dev);
526
	drm_modeset_unlock_all(dev);
588
}
-
 
589
 
527
}
Line -... Line 528...
-
 
528
 
-
 
529
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
-
 
530
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
-
 
531
			      bool rpm_resume);
-
 
532
static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
-
 
533
 
-
 
534
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
-
 
535
{
Line 590... Line 536...
590
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
536
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
591
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
537
	if (acpi_target_system_state() < ACPI_STATE_S3)
592
			      bool rpm_resume);
538
		return true;
593
static int skl_resume_prepare(struct drm_i915_private *dev_priv);
539
#endif
Line 603... Line 549...
603
	/* ignore lid events during suspend */
549
	/* ignore lid events during suspend */
604
	mutex_lock(&dev_priv->modeset_restore_lock);
550
	mutex_lock(&dev_priv->modeset_restore_lock);
605
	dev_priv->modeset_restore = MODESET_SUSPENDED;
551
	dev_priv->modeset_restore = MODESET_SUSPENDED;
606
	mutex_unlock(&dev_priv->modeset_restore_lock);
552
	mutex_unlock(&dev_priv->modeset_restore_lock);
Line -... Line 553...
-
 
553
 
-
 
554
	disable_rpm_wakeref_asserts(dev_priv);
607
 
555
 
608
	/* We do a lot of poking in a lot of registers, make sure they work
556
	/* We do a lot of poking in a lot of registers, make sure they work
609
	 * properly. */
557
	 * properly. */
Line 610... Line 558...
610
	intel_display_set_init_power(dev_priv, true);
558
	intel_display_set_init_power(dev_priv, true);
Line 615... Line 563...
615
 
563
 
616
	error = i915_gem_suspend(dev);
564
	error = i915_gem_suspend(dev);
617
	if (error) {
565
	if (error) {
618
		dev_err(&dev->pdev->dev,
566
		dev_err(&dev->pdev->dev,
619
			"GEM idle failed, resume might fail\n");
567
			"GEM idle failed, resume might fail\n");
620
		return error;
568
		goto out;
Line 621... Line 569...
621
	}
569
	}
Line 622... Line 570...
622
 
570
 
Line 643... Line 591...
643
 
591
 
Line 644... Line 592...
644
	i915_gem_suspend_gtt_mappings(dev);
592
	i915_gem_suspend_gtt_mappings(dev);
Line 645... Line 593...
645
 
593
 
646
	i915_save_state(dev);
-
 
647
 
-
 
648
	opregion_target_state = PCI_D3cold;
-
 
649
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
-
 
650
	if (acpi_target_system_state() < ACPI_STATE_S3)
594
	i915_save_state(dev);
Line 651... Line 595...
651
		opregion_target_state = PCI_D1;
595
 
652
#endif
596
	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
Line 659... Line 603...
659
 
603
 
Line 660... Line 604...
660
	dev_priv->suspend_count++;
604
	dev_priv->suspend_count++;
Line -... Line 605...
-
 
605
 
-
 
606
	intel_display_set_init_power(dev_priv, false);
-
 
607
 
-
 
608
	if (HAS_CSR(dev_priv))
-
 
609
		flush_work(&dev_priv->csr.work);
-
 
610
 
661
 
611
out:
662
	intel_display_set_init_power(dev_priv, false);
612
	enable_rpm_wakeref_asserts(dev_priv);
Line 663... Line 613...
663
 
613
 
664
	return 0;
614
	return error;
665
}
615
}
-
 
616
 
666
 
617
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
Line -... Line 618...
-
 
618
{
-
 
619
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
-
 
620
	bool fw_csr;
-
 
621
	int ret;
-
 
622
 
-
 
623
	disable_rpm_wakeref_asserts(dev_priv);
-
 
624
 
-
 
625
	fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
-
 
626
	/*
-
 
627
	 * In case of firmware assisted context save/restore don't manually
-
 
628
	 * deinit the power domains. This also means the CSR/DMC firmware will
-
 
629
	 * stay active, it will power down any HW resources as required and
-
 
630
	 * also enable deeper system power states that would be blocked if the
667
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
631
	 * firmware was inactive.
Line 668... Line 632...
668
{
632
	 */
669
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
633
	if (!fw_csr)
-
 
634
		intel_power_domains_suspend(dev_priv);
-
 
635
 
Line 670... Line 636...
670
	int ret;
636
	ret = intel_suspend_complete(dev_priv);
671
 
637
 
Line 672... Line 638...
672
	ret = intel_suspend_complete(dev_priv);
638
	if (ret) {
673
 
639
		DRM_ERROR("Suspend complete failed: %d\n", ret);
674
	if (ret) {
640
		if (!fw_csr)
Line 691... Line 657...
691
	 * Acer Aspire 1830T
657
	 * Acer Aspire 1830T
692
	 */
658
	 */
693
	if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
659
	if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
694
		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
660
		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
Line -... Line 661...
-
 
661
 
-
 
662
	dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
-
 
663
 
-
 
664
out:
-
 
665
	enable_rpm_wakeref_asserts(dev_priv);
695
 
666
 
696
	return 0;
667
	return ret;
Line 697... Line 668...
697
}
668
}
698
 
669
 
699
int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
670
int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
Line 722... Line 693...
722
 
693
 
723
static int i915_drm_resume(struct drm_device *dev)
694
static int i915_drm_resume(struct drm_device *dev)
724
{
695
{
Line -... Line 696...
-
 
696
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
697
 
725
	struct drm_i915_private *dev_priv = dev->dev_private;
698
	disable_rpm_wakeref_asserts(dev_priv);
726
 
699
 
727
	mutex_lock(&dev->struct_mutex);
700
	mutex_lock(&dev->struct_mutex);
Line 728... Line 701...
728
	i915_gem_restore_gtt_mappings(dev);
701
	i915_gem_restore_gtt_mappings(dev);
Line 786... Line 759...
786
 
759
 
Line 787... Line 760...
787
	intel_opregion_notify_adapter(dev, PCI_D0);
760
	intel_opregion_notify_adapter(dev, PCI_D0);
Line -... Line 761...
-
 
761
 
-
 
762
	drm_kms_helper_poll_enable(dev);
788
 
763
 
789
	drm_kms_helper_poll_enable(dev);
764
	enable_rpm_wakeref_asserts(dev_priv);
Line 790... Line 765...
790
 
765
 
791
	return 0;
766
	return 0;
792
}
767
}
793
 
768
 
Line 794... Line 769...
794
static int i915_drm_resume_early(struct drm_device *dev)
769
static int i915_drm_resume_early(struct drm_device *dev)
795
{
770
{
796
	struct drm_i915_private *dev_priv = dev->dev_private;
771
	struct drm_i915_private *dev_priv = dev->dev_private;
797
	int ret = 0;
772
	int ret;
798
 
773
 
799
	/*
774
	/*
800
	 * We have a resume ordering issue with the snd-hda driver also
775
	 * We have a resume ordering issue with the snd-hda driver also
801
	 * requiring our device to be power up. Due to the lack of a
776
	 * requiring our device to be power up. Due to the lack of a
802
	 * parent/child relationship we currently solve this with an early
777
	 * parent/child relationship we currently solve this with an early
-
 
778
	 * resume hook.
-
 
779
	 *
-
 
780
	 * FIXME: This should be solved with a special hdmi sink device or
-
 
781
	 * similar so that power domains can be employed.
-
 
782
	 */
-
 
783
 
-
 
784
	/*
-
 
785
	 * Note that we need to set the power state explicitly, since we
-
 
786
	 * powered off the device during freeze and the PCI core won't power
-
 
787
	 * it back up for us during thaw. Powering off the device during
-
 
788
	 * freeze is not a hard requirement though, and during the
-
 
789
	 * suspend/resume phases the PCI core makes sure we get here with the
-
 
790
	 * device powered on. So in case we change our freeze logic and keep
-
 
791
	 * the device powered we can also remove the following set power state
-
 
792
	 * call.
-
 
793
	 */
-
 
794
	ret = pci_set_power_state(dev->pdev, PCI_D0);
-
 
795
	if (ret) {
-
 
796
		DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
-
 
797
		goto out;
-
 
798
	}
-
 
799
 
-
 
800
	/*
-
 
801
	 * Note that pci_enable_device() first enables any parent bridge
-
 
802
	 * device and only then sets the power state for this device. The
-
 
803
	 * bridge enabling is a nop though, since bridge devices are resumed
-
 
804
	 * first. The order of enabling power and enabling the device is
-
 
805
	 * imposed by the PCI core as described above, so here we preserve the
-
 
806
	 * same order for the freeze/thaw phases.
-
 
807
	 *
803
	 * resume hook.
808
	 * TODO: eventually we should remove pci_disable_device() /
804
	 *
809
	 * pci_enable_enable_device() from suspend/resume. Due to how they
-
 
810
	 * depend on the device enable refcount we can't anyway depend on them
-
 
811
	 * disabling/enabling the device.
Line 805... Line 812...
805
	 * FIXME: This should be solved with a special hdmi sink device or
812
	 */
Line -... Line 813...
-
 
813
	if (pci_enable_device(dev->pdev)) {
-
 
814
		ret = -EIO;
806
	 * similar so that power domains can be employed.
815
		goto out;
807
	 */
816
	}
808
	if (pci_enable_device(dev->pdev))
817
 
809
		return -EIO;
818
	pci_set_master(dev->pdev);
810
 
819
 
Line 811... Line 820...
811
	pci_set_master(dev->pdev);
820
	disable_rpm_wakeref_asserts(dev_priv);
Line 812... Line 821...
812
 
821
 
813
	if (IS_VALLEYVIEW(dev_priv))
822
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
814
		ret = vlv_resume_prepare(dev_priv, false);
-
 
815
	if (ret)
-
 
816
		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
823
		ret = vlv_resume_prepare(dev_priv, false);
817
			  ret);
824
	if (ret)
Line 818... Line 825...
818
 
825
		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
-
 
826
			  ret);
-
 
827
 
819
	intel_uncore_early_sanitize(dev, true);
828
	intel_uncore_early_sanitize(dev, true);
-
 
829
 
-
 
830
	if (IS_BROXTON(dev))
-
 
831
		ret = bxt_resume_prepare(dev_priv);
-
 
832
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-
 
833
		hsw_disable_pc8(dev_priv);
Line 820... Line 834...
820
 
834
 
821
	if (IS_BROXTON(dev))
835
	intel_uncore_sanitize(dev);
Line 822... Line 836...
822
		ret = bxt_resume_prepare(dev_priv);
836
 
Line 894... Line 908...
894
		DRM_ERROR("Failed to reset chip: %i\n", ret);
908
		DRM_ERROR("Failed to reset chip: %i\n", ret);
895
		mutex_unlock(&dev->struct_mutex);
909
		mutex_unlock(&dev->struct_mutex);
896
		return ret;
910
		return ret;
897
	}
911
	}
Line -... Line 912...
-
 
912
 
-
 
913
	intel_overlay_reset(dev_priv);
898
 
914
 
Line 899... Line 915...
899
	/* Ok, now get things going again... */
915
	/* Ok, now get things going again... */
900
 
916
 
901
	/*
917
	/*
Line 1029... Line 1045...
1029
		return 0;
1045
		return 0;
Line 1030... Line 1046...
1030
 
1046
 
1031
	return i915_drm_resume(drm_dev);
1047
	return i915_drm_resume(drm_dev);
Line 1032... Line -...
1032
}
-
 
1033
 
-
 
1034
static int skl_suspend_complete(struct drm_i915_private *dev_priv)
-
 
1035
{
-
 
1036
	/* Enabling DC6 is not a hard requirement to enter runtime D3 */
-
 
1037
 
-
 
1038
	skl_uninit_cdclk(dev_priv);
-
 
1039
 
-
 
1040
	return 0;
-
 
1041
}
1048
}
1042
 
1049
 
1043
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
1050
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
Line 1044... Line 1051...
1044
{
1051
{
Line 1077... Line 1084...
1077
	intel_prepare_ddi(dev);
1084
	intel_prepare_ddi(dev);
Line 1078... Line 1085...
1078
 
1085
 
1079
	return 0;
1086
	return 0;
Line 1080... Line -...
1080
}
-
 
1081
 
-
 
1082
static int skl_resume_prepare(struct drm_i915_private *dev_priv)
-
 
1083
{
-
 
1084
	struct drm_device *dev = dev_priv->dev;
-
 
1085
 
-
 
1086
	skl_init_cdclk(dev_priv);
-
 
1087
	intel_csr_load_program(dev);
-
 
1088
 
-
 
1089
	return 0;
-
 
1090
}
1087
}
1091
 
1088
 
1092
/*
1089
/*
1093
 * Save all Gunit registers that may be lost after a D3 and a subsequent
1090
 * Save all Gunit registers that may be lost after a D3 and a subsequent
1094
 * S0i[R123] transition. The list of registers needing a save/restore is
1091
 * S0i[R123] transition. The list of registers needing a save/restore is
Line 1476... Line 1473...
1476
		 */
1473
		 */
1477
		pm_runtime_mark_last_busy(device);
1474
		pm_runtime_mark_last_busy(device);
Line 1478... Line 1475...
1478
 
1475
 
1479
		return -EAGAIN;
1476
		return -EAGAIN;
-
 
1477
	}
-
 
1478
 
-
 
1479
	disable_rpm_wakeref_asserts(dev_priv);
1480
	}
1480
 
1481
	/*
1481
	/*
1482
	 * We are safe here against re-faults, since the fault handler takes
1482
	 * We are safe here against re-faults, since the fault handler takes
1483
	 * an RPM reference.
1483
	 * an RPM reference.
1484
	 */
1484
	 */
1485
	i915_gem_release_all_mmaps(dev_priv);
1485
	i915_gem_release_all_mmaps(dev_priv);
Line -... Line 1486...
-
 
1486
	mutex_unlock(&dev->struct_mutex);
-
 
1487
 
1486
	mutex_unlock(&dev->struct_mutex);
1488
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
Line 1487... Line 1489...
1487
 
1489
 
1488
	intel_guc_suspend(dev);
1490
	intel_guc_suspend(dev);
Line 1489... Line 1491...
1489
 
1491
 
1490
	intel_suspend_gt_powersave(dev);
1492
	intel_suspend_gt_powersave(dev);
1491
	intel_runtime_pm_disable_interrupts(dev_priv);
1493
	intel_runtime_pm_disable_interrupts(dev_priv);
1492
 
1494
 
Line -... Line 1495...
-
 
1495
	ret = intel_suspend_complete(dev_priv);
-
 
1496
	if (ret) {
1493
	ret = intel_suspend_complete(dev_priv);
1497
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
1494
	if (ret) {
1498
		intel_runtime_pm_enable_interrupts(dev_priv);
Line 1495... Line -...
1495
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
-
 
1496
		intel_runtime_pm_enable_interrupts(dev_priv);
1499
 
-
 
1500
		enable_rpm_wakeref_asserts(dev_priv);
-
 
1501
 
-
 
1502
		return ret;
1497
 
1503
	}
Line 1498... Line 1504...
1498
		return ret;
1504
 
1499
	}
1505
	intel_uncore_forcewake_reset(dev, false);
1500
 
1506
 
Line 1541... Line 1547...
1541
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1547
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1542
		return -ENODEV;
1548
		return -ENODEV;
Line 1543... Line 1549...
1543
 
1549
 
Line -... Line 1550...
-
 
1550
	DRM_DEBUG_KMS("Resuming device\n");
-
 
1551
 
-
 
1552
	WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
1544
	DRM_DEBUG_KMS("Resuming device\n");
1553
	disable_rpm_wakeref_asserts(dev_priv);
1545
 
1554
 
Line 1546... Line 1555...
1546
	intel_opregion_notify_adapter(dev, PCI_D0);
1555
	intel_opregion_notify_adapter(dev, PCI_D0);
Line 1547... Line 1556...
1547
	dev_priv->pm.suspended = false;
1556
	dev_priv->pm.suspended = false;
1548
 
1557
 
Line 1549... Line 1558...
1549
	intel_guc_resume(dev);
1558
	intel_guc_resume(dev);
1550
 
1559
 
1551
	if (IS_GEN6(dev_priv))
-
 
1552
		intel_init_pch_refclk(dev);
-
 
1553
 
1560
	if (IS_GEN6(dev_priv))
1554
	if (IS_BROXTON(dev))
1561
		intel_init_pch_refclk(dev);
1555
		ret = bxt_resume_prepare(dev_priv);
1562
 
1556
	else if (IS_SKYLAKE(dev))
1563
	if (IS_BROXTON(dev))
Line 1557... Line 1564...
1557
		ret = skl_resume_prepare(dev_priv);
1564
		ret = bxt_resume_prepare(dev_priv);
1558
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1565
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1559
		hsw_disable_pc8(dev_priv);
1566
		hsw_disable_pc8(dev_priv);
Line 1572... Line 1579...
1572
	/*
1579
	/*
1573
	 * On VLV/CHV display interrupts are part of the display
1580
	 * On VLV/CHV display interrupts are part of the display
1574
	 * power well, so hpd is reinitialized from there. For
1581
	 * power well, so hpd is reinitialized from there. For
1575
	 * everyone else do it here.
1582
	 * everyone else do it here.
1576
	 */
1583
	 */
1577
	if (!IS_VALLEYVIEW(dev_priv))
1584
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1578
		intel_hpd_init(dev_priv);
1585
		intel_hpd_init(dev_priv);
Line 1579... Line 1586...
1579
 
1586
 
Line -... Line 1587...
-
 
1587
	intel_enable_gt_powersave(dev);
-
 
1588
 
1580
	intel_enable_gt_powersave(dev);
1589
	enable_rpm_wakeref_asserts(dev_priv);
1581
 
1590
 
1582
	if (ret)
1591
	if (ret)
1583
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1592
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
Line 1595... Line 1604...
1595
{
1604
{
1596
	int ret;
1605
	int ret;
Line 1597... Line 1606...
1597
 
1606
 
1598
	if (IS_BROXTON(dev_priv))
1607
	if (IS_BROXTON(dev_priv))
1599
		ret = bxt_suspend_complete(dev_priv);
-
 
1600
	else if (IS_SKYLAKE(dev_priv))
-
 
1601
		ret = skl_suspend_complete(dev_priv);
1608
		ret = bxt_suspend_complete(dev_priv);
1602
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1609
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1603
		ret = hsw_suspend_complete(dev_priv);
1610
		ret = hsw_suspend_complete(dev_priv);
1604
	else if (IS_VALLEYVIEW(dev_priv))
1611
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1605
		ret = vlv_suspend_complete(dev_priv);
1612
		ret = vlv_suspend_complete(dev_priv);
1606
	else
1613
	else
Line 1607... Line 1614...
1607
		ret = 0;
1614
		ret = 0;
Line 1727... Line 1734...
1727
    drm_core_init();
1734
    drm_core_init();
Line 1728... Line 1735...
1728
 
1735
 
1729
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
1736
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
Line 1730... Line 1737...
1730
                                device.pci_dev.device);
1737
                                device.pci_dev.device);
Line 1731... Line 1738...
1731
 
1738
 
Line 1732... Line 1739...
1732
    driver.driver_features |= DRIVER_MODESET;
1739
    driver.driver_features |= DRIVER_MODESET+DRIVER_ATOMIC;
1733
 
1740