Subversion Repositories Kolibri OS

Rev

Rev 5060 | Rev 5367 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2325 Serge 1
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2
 */
3
/*
4
 *
5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6
 * All Rights Reserved.
7
 *
8
 * Permission is hereby granted, free of charge, to any person obtaining a
9
 * copy of this software and associated documentation files (the
10
 * "Software"), to deal in the Software without restriction, including
11
 * without limitation the rights to use, copy, modify, merge, publish,
12
 * distribute, sub license, and/or sell copies of the Software, and to
13
 * permit persons to whom the Software is furnished to do so, subject to
14
 * the following conditions:
15
 *
16
 * The above copyright notice and this permission notice (including the
17
 * next paragraph) shall be included in all copies or substantial portions
18
 * of the Software.
19
 *
20
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
 *
28
 */
29
 
2330 Serge 30
//#include 
3031 serge 31
#include 
32
#include 
2330 Serge 33
#include "i915_drv.h"
4126 Serge 34
#include "i915_trace.h"
2330 Serge 35
#include "intel_drv.h"
2325 Serge 36
 
37
#include 
38
#include 
39
#include 
5060 serge 40
#include 
2325 Serge 41
 
3031 serge 42
#include 
43
 
2325 Serge 44
#include 
45
 
5354 serge 46
#
5060 serge 47
static struct drm_driver driver;
2330 Serge 48
 
5060 serge 49
#define GEN_DEFAULT_PIPEOFFSETS \
50
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
51
			  PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
52
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
53
			   TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
54
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
3031 serge 55
 
5060 serge 56
#define GEN_CHV_PIPEOFFSETS \
57
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
58
			  CHV_PIPE_C_OFFSET }, \
59
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
60
			   CHV_TRANSCODER_C_OFFSET, }, \
61
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
62
			     CHV_PALETTE_C_OFFSET }
3031 serge 63
 
5060 serge 64
#define CURSOR_OFFSETS \
65
	.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
2330 Serge 66
 
5060 serge 67
#define IVB_CURSOR_OFFSETS \
68
	.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
2330 Serge 69
 
5060 serge 70
int init_display_kms(struct drm_device *dev);
2330 Serge 71
 
3031 serge 72
 
4104 Serge 73
extern int intel_agp_enabled;
74
 
2326 Serge 75
#define PCI_VENDOR_ID_INTEL        0x8086
76
 
2325 Serge 77
 
2339 Serge 78
static const struct intel_device_info intel_i915g_info = {
3746 Serge 79
	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
2339 Serge 80
	.has_overlay = 1, .overlay_needs_physical = 1,
4560 Serge 81
	.ring_mask = RENDER_RING,
5060 serge 82
	GEN_DEFAULT_PIPEOFFSETS,
83
	CURSOR_OFFSETS,
2339 Serge 84
};
85
static const struct intel_device_info intel_i915gm_info = {
3746 Serge 86
	.gen = 3, .is_mobile = 1, .num_pipes = 2,
2339 Serge 87
	.cursor_needs_physical = 1,
88
	.has_overlay = 1, .overlay_needs_physical = 1,
89
	.supports_tv = 1,
4560 Serge 90
	.has_fbc = 1,
91
	.ring_mask = RENDER_RING,
5060 serge 92
	GEN_DEFAULT_PIPEOFFSETS,
93
	CURSOR_OFFSETS,
2339 Serge 94
};
95
static const struct intel_device_info intel_i945g_info = {
3746 Serge 96
	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
2339 Serge 97
	.has_overlay = 1, .overlay_needs_physical = 1,
4560 Serge 98
	.ring_mask = RENDER_RING,
5060 serge 99
	GEN_DEFAULT_PIPEOFFSETS,
100
	CURSOR_OFFSETS,
2339 Serge 101
};
102
static const struct intel_device_info intel_i945gm_info = {
3746 Serge 103
	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
2339 Serge 104
	.has_hotplug = 1, .cursor_needs_physical = 1,
105
	.has_overlay = 1, .overlay_needs_physical = 1,
106
	.supports_tv = 1,
4560 Serge 107
	.has_fbc = 1,
108
	.ring_mask = RENDER_RING,
5060 serge 109
	GEN_DEFAULT_PIPEOFFSETS,
110
	CURSOR_OFFSETS,
2339 Serge 111
};
112
 
113
static const struct intel_device_info intel_i965g_info = {
3746 Serge 114
	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
2339 Serge 115
	.has_hotplug = 1,
116
	.has_overlay = 1,
4560 Serge 117
	.ring_mask = RENDER_RING,
5060 serge 118
	GEN_DEFAULT_PIPEOFFSETS,
119
	CURSOR_OFFSETS,
2339 Serge 120
};
121
 
122
static const struct intel_device_info intel_i965gm_info = {
3746 Serge 123
	.gen = 4, .is_crestline = 1, .num_pipes = 2,
2339 Serge 124
	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
125
	.has_overlay = 1,
126
	.supports_tv = 1,
4560 Serge 127
	.ring_mask = RENDER_RING,
5060 serge 128
	GEN_DEFAULT_PIPEOFFSETS,
129
	CURSOR_OFFSETS,
2339 Serge 130
};
131
 
132
static const struct intel_device_info intel_g33_info = {
3746 Serge 133
	.gen = 3, .is_g33 = 1, .num_pipes = 2,
2339 Serge 134
	.need_gfx_hws = 1, .has_hotplug = 1,
135
	.has_overlay = 1,
4560 Serge 136
	.ring_mask = RENDER_RING,
5060 serge 137
	GEN_DEFAULT_PIPEOFFSETS,
138
	CURSOR_OFFSETS,
2339 Serge 139
};
140
 
141
static const struct intel_device_info intel_g45_info = {
3746 Serge 142
	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
2339 Serge 143
	.has_pipe_cxsr = 1, .has_hotplug = 1,
4560 Serge 144
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 145
	GEN_DEFAULT_PIPEOFFSETS,
146
	CURSOR_OFFSETS,
2339 Serge 147
};
148
 
149
static const struct intel_device_info intel_gm45_info = {
3746 Serge 150
	.gen = 4, .is_g4x = 1, .num_pipes = 2,
2339 Serge 151
	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
152
	.has_pipe_cxsr = 1, .has_hotplug = 1,
153
	.supports_tv = 1,
4560 Serge 154
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 155
	GEN_DEFAULT_PIPEOFFSETS,
156
	CURSOR_OFFSETS,
2339 Serge 157
};
158
 
159
static const struct intel_device_info intel_pineview_info = {
3746 Serge 160
	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
2339 Serge 161
	.need_gfx_hws = 1, .has_hotplug = 1,
162
	.has_overlay = 1,
5060 serge 163
	GEN_DEFAULT_PIPEOFFSETS,
164
	CURSOR_OFFSETS,
2339 Serge 165
};
166
 
167
static const struct intel_device_info intel_ironlake_d_info = {
3746 Serge 168
	.gen = 5, .num_pipes = 2,
3031 serge 169
	.need_gfx_hws = 1, .has_hotplug = 1,
4560 Serge 170
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 171
	GEN_DEFAULT_PIPEOFFSETS,
172
	CURSOR_OFFSETS,
2339 Serge 173
};
174
 
175
static const struct intel_device_info intel_ironlake_m_info = {
3746 Serge 176
	.gen = 5, .is_mobile = 1, .num_pipes = 2,
2339 Serge 177
	.need_gfx_hws = 1, .has_hotplug = 1,
178
	.has_fbc = 1,
4560 Serge 179
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 180
	GEN_DEFAULT_PIPEOFFSETS,
181
	CURSOR_OFFSETS,
2339 Serge 182
};
183
 
2325 Serge 184
static const struct intel_device_info intel_sandybridge_d_info = {
3746 Serge 185
	.gen = 6, .num_pipes = 2,
2330 Serge 186
	.need_gfx_hws = 1, .has_hotplug = 1,
4560 Serge 187
	.has_fbc = 1,
188
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
3031 serge 189
	.has_llc = 1,
5060 serge 190
	GEN_DEFAULT_PIPEOFFSETS,
191
	CURSOR_OFFSETS,
2325 Serge 192
};
193
 
194
static const struct intel_device_info intel_sandybridge_m_info = {
3746 Serge 195
	.gen = 6, .is_mobile = 1, .num_pipes = 2,
2330 Serge 196
	.need_gfx_hws = 1, .has_hotplug = 1,
2325 Serge 197
    .has_fbc      = 1,
4560 Serge 198
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
3031 serge 199
	.has_llc = 1,
5060 serge 200
	GEN_DEFAULT_PIPEOFFSETS,
201
	CURSOR_OFFSETS,
2325 Serge 202
};
203
 
3746 Serge 204
#define GEN7_FEATURES  \
205
	.gen = 7, .num_pipes = 3, \
206
	.need_gfx_hws = 1, .has_hotplug = 1, \
4560 Serge 207
	.has_fbc = 1, \
208
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
209
	.has_llc = 1
3746 Serge 210
 
2339 Serge 211
static const struct intel_device_info intel_ivybridge_d_info = {
3746 Serge 212
	GEN7_FEATURES,
213
	.is_ivybridge = 1,
5060 serge 214
	GEN_DEFAULT_PIPEOFFSETS,
215
	IVB_CURSOR_OFFSETS,
2339 Serge 216
};
2325 Serge 217
 
2339 Serge 218
static const struct intel_device_info intel_ivybridge_m_info = {
3746 Serge 219
	GEN7_FEATURES,
220
	.is_ivybridge = 1,
221
	.is_mobile = 1,
5060 serge 222
	GEN_DEFAULT_PIPEOFFSETS,
223
	IVB_CURSOR_OFFSETS,
2339 Serge 224
};
225
 
3746 Serge 226
static const struct intel_device_info intel_ivybridge_q_info = {
227
	GEN7_FEATURES,
228
	.is_ivybridge = 1,
229
	.num_pipes = 0, /* legal, last one wins */
5060 serge 230
	GEN_DEFAULT_PIPEOFFSETS,
231
	IVB_CURSOR_OFFSETS,
3746 Serge 232
};
233
 
3031 serge 234
static const struct intel_device_info intel_valleyview_m_info = {
3746 Serge 235
	GEN7_FEATURES,
236
	.is_mobile = 1,
237
	.num_pipes = 2,
3031 serge 238
	.is_valleyview = 1,
3480 Serge 239
	.display_mmio_offset = VLV_DISPLAY_BASE,
4560 Serge 240
	.has_fbc = 0, /* legal, last one wins */
3746 Serge 241
	.has_llc = 0, /* legal, last one wins */
5060 serge 242
	GEN_DEFAULT_PIPEOFFSETS,
243
	CURSOR_OFFSETS,
3031 serge 244
};
245
 
246
static const struct intel_device_info intel_valleyview_d_info = {
3746 Serge 247
	GEN7_FEATURES,
248
	.num_pipes = 2,
3031 serge 249
	.is_valleyview = 1,
3480 Serge 250
	.display_mmio_offset = VLV_DISPLAY_BASE,
4560 Serge 251
	.has_fbc = 0, /* legal, last one wins */
3746 Serge 252
	.has_llc = 0, /* legal, last one wins */
5060 serge 253
	GEN_DEFAULT_PIPEOFFSETS,
254
	CURSOR_OFFSETS,
3031 serge 255
};
256
 
257
static const struct intel_device_info intel_haswell_d_info = {
3746 Serge 258
	GEN7_FEATURES,
259
	.is_haswell = 1,
4104 Serge 260
	.has_ddi = 1,
261
	.has_fpga_dbg = 1,
4560 Serge 262
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
5060 serge 263
	GEN_DEFAULT_PIPEOFFSETS,
264
	IVB_CURSOR_OFFSETS,
3031 serge 265
};
266
 
267
static const struct intel_device_info intel_haswell_m_info = {
3746 Serge 268
	GEN7_FEATURES,
269
	.is_haswell = 1,
270
	.is_mobile = 1,
4104 Serge 271
	.has_ddi = 1,
272
	.has_fpga_dbg = 1,
4560 Serge 273
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
5060 serge 274
	GEN_DEFAULT_PIPEOFFSETS,
275
	IVB_CURSOR_OFFSETS,
3031 serge 276
};
277
 
4560 Serge 278
static const struct intel_device_info intel_broadwell_d_info = {
279
	.gen = 8, .num_pipes = 3,
280
	.need_gfx_hws = 1, .has_hotplug = 1,
281
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
282
	.has_llc = 1,
283
	.has_ddi = 1,
5060 serge 284
	.has_fpga_dbg = 1,
285
	.has_fbc = 1,
286
	GEN_DEFAULT_PIPEOFFSETS,
287
	IVB_CURSOR_OFFSETS,
4560 Serge 288
};
289
 
290
static const struct intel_device_info intel_broadwell_m_info = {
291
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
292
	.need_gfx_hws = 1, .has_hotplug = 1,
293
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
294
	.has_llc = 1,
295
	.has_ddi = 1,
5060 serge 296
	.has_fpga_dbg = 1,
297
	.has_fbc = 1,
298
	GEN_DEFAULT_PIPEOFFSETS,
299
	IVB_CURSOR_OFFSETS,
4560 Serge 300
};
301
 
5060 serge 302
static const struct intel_device_info intel_broadwell_gt3d_info = {
303
	.gen = 8, .num_pipes = 3,
304
	.need_gfx_hws = 1, .has_hotplug = 1,
305
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
306
	.has_llc = 1,
307
	.has_ddi = 1,
308
	.has_fpga_dbg = 1,
309
	.has_fbc = 1,
310
	GEN_DEFAULT_PIPEOFFSETS,
311
	IVB_CURSOR_OFFSETS,
312
};
313
 
314
static const struct intel_device_info intel_broadwell_gt3m_info = {
315
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
316
	.need_gfx_hws = 1, .has_hotplug = 1,
317
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
318
	.has_llc = 1,
319
	.has_ddi = 1,
320
	.has_fpga_dbg = 1,
321
	.has_fbc = 1,
322
	GEN_DEFAULT_PIPEOFFSETS,
323
	IVB_CURSOR_OFFSETS,
324
};
325
 
326
static const struct intel_device_info intel_cherryview_info = {
327
	.is_preliminary = 1,
328
	.gen = 8, .num_pipes = 3,
329
	.need_gfx_hws = 1, .has_hotplug = 1,
330
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
331
	.is_valleyview = 1,
332
	.display_mmio_offset = VLV_DISPLAY_BASE,
333
	GEN_CHV_PIPEOFFSETS,
334
	CURSOR_OFFSETS,
335
};
336
 
5354 serge 337
static const struct intel_device_info intel_skylake_info = {
338
	.is_preliminary = 1,
339
	.is_skylake = 1,
340
	.gen = 9, .num_pipes = 3,
341
	.need_gfx_hws = 1, .has_hotplug = 1,
342
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
343
	.has_llc = 1,
344
	.has_ddi = 1,
345
	.has_fbc = 1,
346
	GEN_DEFAULT_PIPEOFFSETS,
347
	IVB_CURSOR_OFFSETS,
348
};
349
 
4104 Serge 350
/*
351
 * Make sure any device matches here are from most specific to most
352
 * general.  For example, since the Quanta match is based on the subsystem
353
 * and subvendor IDs, we need it to come before the more general IVB
354
 * PCI ID matches, otherwise we'll use the wrong info struct above.
355
 */
356
#define INTEL_PCI_IDS \
357
	INTEL_I915G_IDS(&intel_i915g_info),	\
358
	INTEL_I915GM_IDS(&intel_i915gm_info),	\
359
	INTEL_I945G_IDS(&intel_i945g_info),	\
360
	INTEL_I945GM_IDS(&intel_i945gm_info),	\
361
	INTEL_I965G_IDS(&intel_i965g_info),	\
362
	INTEL_G33_IDS(&intel_g33_info),		\
363
	INTEL_I965GM_IDS(&intel_i965gm_info),	\
364
	INTEL_GM45_IDS(&intel_gm45_info), 	\
365
	INTEL_G45_IDS(&intel_g45_info), 	\
366
	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
367
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
368
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
369
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
370
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
371
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
372
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
373
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
374
	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
375
	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
376
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
4560 Serge 377
	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
5060 serge 378
	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),	\
379
	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),	\
380
	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),	\
381
	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
5354 serge 382
	INTEL_CHV_IDS(&intel_cherryview_info),	\
383
	INTEL_SKL_IDS(&intel_skylake_info)
4104 Serge 384
 
2325 Serge 385
static const struct pci_device_id pciidlist[] = {       /* aka */
4104 Serge 386
	INTEL_PCI_IDS,
2325 Serge 387
    {0, 0, 0}
388
};
389
 
2326 Serge 390
#define INTEL_PCH_DEVICE_ID_MASK        0xff00
391
#define INTEL_PCH_IBX_DEVICE_ID_TYPE    0x3b00
392
#define INTEL_PCH_CPT_DEVICE_ID_TYPE    0x1c00
393
#define INTEL_PCH_PPT_DEVICE_ID_TYPE    0x1e00
3031 serge 394
#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
2325 Serge 395
 
2342 Serge 396
void intel_detect_pch(struct drm_device *dev)
2326 Serge 397
{
398
    struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 399
	struct pci_dev *pch = NULL;
2326 Serge 400
 
3746 Serge 401
	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
402
	 * (which really amounts to a PCH but no South Display).
403
	 */
404
	if (INTEL_INFO(dev)->num_pipes == 0) {
405
		dev_priv->pch_type = PCH_NOP;
406
		return;
407
	}
408
 
2326 Serge 409
    /*
410
     * The reason to probe ISA bridge instead of Dev31:Fun0 is to
411
     * make graphics device passthrough work easy for VMM, that only
412
     * need to expose ISA bridge to let driver know the real hardware
413
     * underneath. This is a requirement from virtualization team.
4104 Serge 414
	 *
415
	 * In some virtualized environments (e.g. XEN), there is irrelevant
416
	 * ISA bridge in the system. To work reliably, we should scan trhough
417
	 * all the ISA bridge devices and check for the first match, instead
418
	 * of only checking the first one.
2326 Serge 419
     */
5060 serge 420
	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
2326 Serge 421
        if (pch->vendor == PCI_VENDOR_ID_INTEL) {
5060 serge 422
			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
3243 Serge 423
			dev_priv->pch_id = id;
2326 Serge 424
 
425
            if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
426
                dev_priv->pch_type = PCH_IBX;
427
                DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
3243 Serge 428
				WARN_ON(!IS_GEN5(dev));
2326 Serge 429
            } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
430
                dev_priv->pch_type = PCH_CPT;
431
                DRM_DEBUG_KMS("Found CougarPoint PCH\n");
3243 Serge 432
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
2326 Serge 433
            } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
434
                /* PantherPoint is CPT compatible */
435
                dev_priv->pch_type = PCH_CPT;
4560 Serge 436
				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
3243 Serge 437
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
3031 serge 438
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
439
				dev_priv->pch_type = PCH_LPT;
440
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
3243 Serge 441
				WARN_ON(!IS_HASWELL(dev));
5354 serge 442
				WARN_ON(IS_HSW_ULT(dev));
4560 Serge 443
			} else if (IS_BROADWELL(dev)) {
444
				dev_priv->pch_type = PCH_LPT;
445
				dev_priv->pch_id =
446
					INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
447
				DRM_DEBUG_KMS("This is Broadwell, assuming "
448
					      "LynxPoint LP PCH\n");
3243 Serge 449
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
450
				dev_priv->pch_type = PCH_LPT;
451
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
452
				WARN_ON(!IS_HASWELL(dev));
5354 serge 453
				WARN_ON(!IS_HSW_ULT(dev));
454
			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
455
				dev_priv->pch_type = PCH_SPT;
456
				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
457
				WARN_ON(!IS_SKYLAKE(dev));
458
			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
459
				dev_priv->pch_type = PCH_SPT;
460
				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
461
				WARN_ON(!IS_SKYLAKE(dev));
5060 serge 462
			} else
463
				continue;
464
 
4104 Serge 465
			break;
2326 Serge 466
        }
467
    }
4104 Serge 468
	if (!pch)
5060 serge 469
		DRM_DEBUG_KMS("No PCH found.\n");
470
 
471
//	pci_dev_put(pch);
2326 Serge 472
}
473
 
3031 serge 474
bool i915_semaphore_is_enabled(struct drm_device *dev)
2326 Serge 475
{
3031 serge 476
	if (INTEL_INFO(dev)->gen < 6)
4560 Serge 477
		return false;
2326 Serge 478
 
5060 serge 479
	if (i915.semaphores >= 0)
480
		return i915.semaphores;
481
 
5354 serge 482
	/* TODO: make semaphores and Execlists play nicely together */
483
	if (i915.enable_execlists)
484
		return false;
485
 
4560 Serge 486
	/* Until we get further testing... */
5060 serge 487
	if (IS_GEN8(dev))
4560 Serge 488
		return false;
489
 
3031 serge 490
#ifdef CONFIG_INTEL_IOMMU
491
	/* Enable semaphores on SNB when IO remapping is off */
492
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
493
		return false;
494
#endif
2326 Serge 495
 
4560 Serge 496
	return true;
2326 Serge 497
}
498
 
4104 Serge 499
#if 0
5060 serge 500
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
501
{
502
	struct drm_device *dev = dev_priv->dev;
503
	struct drm_encoder *encoder;
504
 
505
	drm_modeset_lock_all(dev);
506
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
507
		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
508
 
509
		if (intel_encoder->suspend)
510
			intel_encoder->suspend(intel_encoder);
511
	}
512
	drm_modeset_unlock_all(dev);
513
}
514
 
5354 serge 515
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
516
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
517
			      bool rpm_resume);
518
 
519
static int i915_drm_suspend(struct drm_device *dev)
4104 Serge 520
{
521
	struct drm_i915_private *dev_priv = dev->dev_private;
522
	struct drm_crtc *crtc;
5060 serge 523
	pci_power_t opregion_target_state;
2342 Serge 524
 
4104 Serge 525
	/* ignore lid events during suspend */
526
	mutex_lock(&dev_priv->modeset_restore_lock);
527
	dev_priv->modeset_restore = MODESET_SUSPENDED;
528
	mutex_unlock(&dev_priv->modeset_restore_lock);
2342 Serge 529
 
4104 Serge 530
	/* We do a lot of poking in a lot of registers, make sure they work
531
	 * properly. */
5060 serge 532
	intel_display_set_init_power(dev_priv, true);
2342 Serge 533
 
4104 Serge 534
	drm_kms_helper_poll_disable(dev);
2342 Serge 535
 
4104 Serge 536
	pci_save_state(dev->pdev);
2325 Serge 537
 
4104 Serge 538
	/* If KMS is active, we do the leavevt stuff here */
539
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
540
		int error;
541
 
4560 Serge 542
		error = i915_gem_suspend(dev);
4104 Serge 543
		if (error) {
544
			dev_err(&dev->pdev->dev,
545
				"GEM idle failed, resume might fail\n");
546
			return error;
547
		}
548
 
5354 serge 549
		intel_suspend_gt_powersave(dev);
550
 
4104 Serge 551
		/*
552
		 * Disable CRTCs directly since we want to preserve sw state
5060 serge 553
		 * for _thaw. Also, power gate the CRTC power wells.
4104 Serge 554
		 */
5060 serge 555
		drm_modeset_lock_all(dev);
556
		for_each_crtc(dev, crtc)
557
			intel_crtc_control(crtc, false);
558
		drm_modeset_unlock_all(dev);
4104 Serge 559
 
5060 serge 560
		intel_dp_mst_suspend(dev);
561
 
5354 serge 562
		intel_runtime_pm_disable_interrupts(dev_priv);
563
		intel_hpd_cancel_work(dev_priv);
5060 serge 564
 
565
		intel_suspend_encoders(dev_priv);
566
 
5354 serge 567
		intel_suspend_hw(dev);
4104 Serge 568
	}
569
 
4560 Serge 570
	i915_gem_suspend_gtt_mappings(dev);
571
 
4104 Serge 572
	i915_save_state(dev);
573
 
5060 serge 574
	opregion_target_state = PCI_D3cold;
575
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
576
	if (acpi_target_system_state() < ACPI_STATE_S3)
577
		opregion_target_state = PCI_D1;
578
#endif
579
	intel_opregion_notify_adapter(dev, opregion_target_state);
580
 
581
	intel_uncore_forcewake_reset(dev, false);
4104 Serge 582
	intel_opregion_fini(dev);
583
 
5354 serge 584
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
4104 Serge 585
 
5060 serge 586
	dev_priv->suspend_count++;
587
 
588
	intel_display_set_init_power(dev_priv, false);
589
 
4104 Serge 590
	return 0;
591
}
592
 
5354 serge 593
static int i915_drm_suspend_late(struct drm_device *drm_dev)
2325 Serge 594
{
5354 serge 595
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
596
	int ret;
597
 
598
	ret = intel_suspend_complete(dev_priv);
599
 
600
	if (ret) {
601
		DRM_ERROR("Suspend complete failed: %d\n", ret);
602
 
603
		return ret;
604
	}
605
 
606
	pci_disable_device(drm_dev->pdev);
607
	pci_set_power_state(drm_dev->pdev, PCI_D3hot);
608
 
609
	return 0;
610
}
611
 
612
int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
613
{
4104 Serge 614
	int error;
2325 Serge 615
 
4104 Serge 616
	if (!dev || !dev->dev_private) {
617
		DRM_ERROR("dev: %p\n", dev);
618
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
619
		return -ENODEV;
620
	}
2325 Serge 621
 
5354 serge 622
	if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
623
			 state.event != PM_EVENT_FREEZE))
624
		return -EINVAL;
3031 serge 625
 
4104 Serge 626
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
627
		return 0;
2325 Serge 628
 
5354 serge 629
	error = i915_drm_suspend(dev);
4104 Serge 630
	if (error)
631
		return error;
3031 serge 632
 
5354 serge 633
	return i915_drm_suspend_late(dev);
4104 Serge 634
}
2325 Serge 635
 
5354 serge 636
static int i915_drm_resume(struct drm_device *dev)
4104 Serge 637
{
5060 serge 638
	struct drm_i915_private *dev_priv = dev->dev_private;
3260 Serge 639
 
5354 serge 640
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4560 Serge 641
		mutex_lock(&dev->struct_mutex);
642
		i915_gem_restore_gtt_mappings(dev);
643
		mutex_unlock(&dev->struct_mutex);
644
	}
645
 
4104 Serge 646
	i915_restore_state(dev);
647
	intel_opregion_setup(dev);
648
 
649
	/* KMS EnterVT equivalent */
650
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
651
		intel_init_pch_refclk(dev);
5060 serge 652
		drm_mode_config_reset(dev);
4104 Serge 653
 
654
		mutex_lock(&dev->struct_mutex);
5060 serge 655
		if (i915_gem_init_hw(dev)) {
656
			DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
657
			atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
658
		}
4104 Serge 659
		mutex_unlock(&dev->struct_mutex);
660
 
5354 serge 661
		/* We need working interrupts for modeset enabling ... */
662
		intel_runtime_pm_enable_interrupts(dev_priv);
4104 Serge 663
 
664
		intel_modeset_init_hw(dev);
665
 
5354 serge 666
		spin_lock_irq(&dev_priv->irq_lock);
5060 serge 667
			if (dev_priv->display.hpd_irq_setup)
668
				dev_priv->display.hpd_irq_setup(dev);
5354 serge 669
		spin_unlock_irq(&dev_priv->irq_lock);
5060 serge 670
 
4104 Serge 671
		drm_modeset_lock_all(dev);
672
		intel_modeset_setup_hw_state(dev, true);
673
		drm_modeset_unlock_all(dev);
674
 
5354 serge 675
		intel_dp_mst_resume(dev);
676
 
4104 Serge 677
		/*
678
		 * ... but also need to make sure that hotplug processing
679
		 * doesn't cause havoc. Like in the driver load code we don't
680
		 * bother with the tiny race here where we might loose hotplug
681
		 * notifications.
682
		 * */
5354 serge 683
		intel_hpd_init(dev_priv);
4104 Serge 684
		/* Config may have changed between suspend and resume */
5060 serge 685
		drm_helper_hpd_irq_event(dev);
4104 Serge 686
	}
687
 
688
	intel_opregion_init(dev);
689
 
5354 serge 690
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
4104 Serge 691
 
692
	mutex_lock(&dev_priv->modeset_restore_lock);
693
	dev_priv->modeset_restore = MODESET_DONE;
694
	mutex_unlock(&dev_priv->modeset_restore_lock);
4560 Serge 695
 
5060 serge 696
	intel_opregion_notify_adapter(dev, PCI_D0);
697
 
5354 serge 698
	drm_kms_helper_poll_enable(dev);
699
 
5060 serge 700
	return 0;
4104 Serge 701
}
702
 
5354 serge 703
static int i915_drm_resume_early(struct drm_device *dev)
4104 Serge 704
{
5354 serge 705
	struct drm_i915_private *dev_priv = dev->dev_private;
706
	int ret = 0;
4104 Serge 707
 
5060 serge 708
	/*
709
	 * We have a resume ordering issue with the snd-hda driver also
710
	 * requiring our device to be power up. Due to the lack of a
711
	 * parent/child relationship we currently solve this with an early
712
	 * resume hook.
713
	 *
714
	 * FIXME: This should be solved with a special hdmi sink device or
715
	 * similar so that power domains can be employed.
716
	 */
4104 Serge 717
	if (pci_enable_device(dev->pdev))
718
		return -EIO;
719
 
720
	pci_set_master(dev->pdev);
721
 
5354 serge 722
	if (IS_VALLEYVIEW(dev_priv))
723
		ret = vlv_resume_prepare(dev_priv, false);
724
	if (ret)
725
		DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
726
 
727
	intel_uncore_early_sanitize(dev, true);
728
 
729
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
730
		hsw_disable_pc8(dev_priv);
731
 
732
	intel_uncore_sanitize(dev);
733
	intel_power_domains_init_hw(dev_priv);
734
 
735
	return ret;
5060 serge 736
}
737
 
5354 serge 738
int i915_resume_legacy(struct drm_device *dev)
5060 serge 739
{
740
	int ret;
741
 
5354 serge 742
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
743
		return 0;
744
 
745
	ret = i915_drm_resume_early(dev);
4104 Serge 746
	if (ret)
747
		return ret;
748
 
5354 serge 749
	return i915_drm_resume(dev);
4104 Serge 750
}
751
 
752
/**
753
 * i915_reset - reset chip after a hang
754
 * @dev: drm device to reset
755
 *
756
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
757
 * reset or otherwise an error code.
758
 *
759
 * Procedure is fairly simple:
760
 *   - reset the chip using the reset reg
761
 *   - re-init context state
762
 *   - re-init hardware status page
763
 *   - re-init ring buffer
764
 *   - re-init interrupt state
765
 *   - re-init display
766
 */
767
int i915_reset(struct drm_device *dev)
768
{
5060 serge 769
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 770
	bool simulated;
771
	int ret;
772
 
5060 serge 773
	if (!i915.reset)
4104 Serge 774
		return 0;
775
 
776
	mutex_lock(&dev->struct_mutex);
777
 
778
	i915_gem_reset(dev);
779
 
780
	simulated = dev_priv->gpu_error.stop_rings != 0;
781
 
782
		ret = intel_gpu_reset(dev);
783
 
784
		/* Also reset the gpu hangman. */
785
		if (simulated) {
786
			DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
787
			dev_priv->gpu_error.stop_rings = 0;
788
			if (ret == -ENODEV) {
4560 Serge 789
			DRM_INFO("Reset not implemented, but ignoring "
4104 Serge 790
					  "error for simulated gpu hangs\n");
791
				ret = 0;
792
			}
793
	}
4560 Serge 794
 
5354 serge 795
	if (i915_stop_ring_allow_warn(dev_priv))
796
		pr_notice("drm/i915: Resetting chip after gpu hang\n");
797
 
4104 Serge 798
	if (ret) {
4560 Serge 799
		DRM_ERROR("Failed to reset chip: %i\n", ret);
4104 Serge 800
		mutex_unlock(&dev->struct_mutex);
801
		return ret;
802
	}
803
 
804
	/* Ok, now get things going again... */
805
 
806
	/*
807
	 * Everything depends on having the GTT running, so we need to start
808
	 * there.  Fortunately we don't need to do this unless we reset the
809
	 * chip at a PCI level.
810
	 *
811
	 * Next we need to restore the context, but we don't use those
812
	 * yet either...
813
	 *
814
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
815
	 * was running at the time of the reset (i.e. we weren't VT
816
	 * switched away).
817
	 */
5354 serge 818
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
819
		/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
820
		dev_priv->gpu_error.reload_in_reset = true;
4104 Serge 821
 
4560 Serge 822
		ret = i915_gem_init_hw(dev);
5354 serge 823
 
824
		dev_priv->gpu_error.reload_in_reset = false;
825
 
4560 Serge 826
		mutex_unlock(&dev->struct_mutex);
827
		if (ret) {
828
			DRM_ERROR("Failed hw init on reset %d\n", ret);
829
			return ret;
4104 Serge 830
		}
831
 
5060 serge 832
		/*
833
		 * FIXME: This races pretty badly against concurrent holders of
834
		 * ring interrupts. This is possible since we've started to drop
835
		 * dev->struct_mutex in select places when waiting for the gpu.
836
		 */
837
 
838
		/*
839
		 * rps/rc6 re-init is necessary to restore state lost after the
840
		 * reset and the re-install of gt irqs. Skip for ironlake per
841
		 * previous concerns that it doesn't respond well to some forms
842
		 * of re-init after reset.
843
		 */
844
		if (INTEL_INFO(dev)->gen > 5)
845
			intel_reset_gt_powersave(dev);
4104 Serge 846
	} else {
847
		mutex_unlock(&dev->struct_mutex);
848
	}
849
 
850
	return 0;
851
}
852
 
853
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
854
{
855
	struct intel_device_info *intel_info =
856
		(struct intel_device_info *) ent->driver_data;
857
 
5060 serge 858
	if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
4560 Serge 859
		DRM_INFO("This hardware requires preliminary hardware support.\n"
860
			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
861
		return -ENODEV;
862
	}
863
 
4104 Serge 864
	/* Only bind to function 0 of the device. Early generations
865
	 * used function 1 as a placeholder for multi-head. This causes
866
	 * us confusion instead, especially on the systems where both
867
	 * functions have the same PCI-ID!
868
	 */
869
	if (PCI_FUNC(pdev->devfn))
870
		return -ENODEV;
871
 
5060 serge 872
	driver.driver_features &= ~(DRIVER_USE_AGP);
4104 Serge 873
 
874
	return drm_get_pci_dev(pdev, ent, &driver);
875
}
876
 
877
static void
878
i915_pci_remove(struct pci_dev *pdev)
879
{
880
	struct drm_device *dev = pci_get_drvdata(pdev);
881
 
882
	drm_put_dev(dev);
883
}
884
 
885
static int i915_pm_suspend(struct device *dev)
886
{
887
	struct pci_dev *pdev = to_pci_dev(dev);
888
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
889
 
890
	if (!drm_dev || !drm_dev->dev_private) {
891
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
892
		return -ENODEV;
893
	}
894
 
895
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
896
		return 0;
897
 
5354 serge 898
	return i915_drm_suspend(drm_dev);
5060 serge 899
}
4104 Serge 900
 
5060 serge 901
static int i915_pm_suspend_late(struct device *dev)
902
{
903
	struct pci_dev *pdev = to_pci_dev(dev);
904
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
905
 
906
	/*
907
	 * We have a suspedn ordering issue with the snd-hda driver also
908
	 * requiring our device to be power up. Due to the lack of a
909
	 * parent/child relationship we currently solve this with an late
910
	 * suspend hook.
911
	 *
912
	 * FIXME: This should be solved with a special hdmi sink device or
913
	 * similar so that power domains can be employed.
914
	 */
915
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
916
		return 0;
917
 
5354 serge 918
	return i915_drm_suspend_late(drm_dev);
4104 Serge 919
}
920
 
5060 serge 921
static int i915_pm_resume_early(struct device *dev)
922
{
923
	struct pci_dev *pdev = to_pci_dev(dev);
924
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
925
 
5354 serge 926
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
927
		return 0;
928
 
929
	return i915_drm_resume_early(drm_dev);
5060 serge 930
}
931
 
4104 Serge 932
static int i915_pm_resume(struct device *dev)
933
{
934
	struct pci_dev *pdev = to_pci_dev(dev);
935
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
936
 
5354 serge 937
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
938
		return 0;
4104 Serge 939
 
5354 serge 940
	return i915_drm_resume(drm_dev);
4104 Serge 941
}
942
 
5354 serge 943
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
5060 serge 944
{
945
	hsw_enable_pc8(dev_priv);
4560 Serge 946
 
5060 serge 947
	return 0;
948
}
949
 
950
/*
951
 * Save all Gunit registers that may be lost after a D3 and a subsequent
952
 * S0i[R123] transition. The list of registers needing a save/restore is
953
 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
954
 * registers in the following way:
955
 * - Driver: saved/restored by the driver
956
 * - Punit : saved/restored by the Punit firmware
957
 * - No, w/o marking: no need to save/restore, since the register is R/O or
958
 *                    used internally by the HW in a way that doesn't depend
959
 *                    keeping the content across a suspend/resume.
960
 * - Debug : used for debugging
961
 *
962
 * We save/restore all registers marked with 'Driver', with the following
963
 * exceptions:
964
 * - Registers out of use, including also registers marked with 'Debug'.
965
 *   These have no effect on the driver's operation, so we don't save/restore
966
 *   them to reduce the overhead.
967
 * - Registers that are fully setup by an initialization function called from
968
 *   the resume path. For example many clock gating and RPS/RC6 registers.
969
 * - Registers that provide the right functionality with their reset defaults.
970
 *
971
 * TODO: Except for registers that based on the above 3 criteria can be safely
972
 * ignored, we save/restore all others, practically treating the HW context as
973
 * a black-box for the driver. Further investigation is needed to reduce the
974
 * saved/restored registers even further, by following the same 3 criteria.
975
 */
976
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
977
{
978
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
979
	int i;
980
 
981
	/* GAM 0x4000-0x4770 */
982
	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
983
	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
984
	s->arb_mode		= I915_READ(ARB_MODE);
985
	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
986
	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);
987
 
988
	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
989
		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
990
 
991
	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
992
	s->gfx_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
993
 
994
	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
995
	s->ecochk		= I915_READ(GAM_ECOCHK);
996
	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
997
	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);
998
 
999
	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);
1000
 
1001
	/* MBC 0x9024-0x91D0, 0x8500 */
1002
	s->g3dctl		= I915_READ(VLV_G3DCTL);
1003
	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
1004
	s->mbctl		= I915_READ(GEN6_MBCTL);
1005
 
1006
	/* GCP 0x9400-0x9424, 0x8100-0x810C */
1007
	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
1008
	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
1009
	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
1010
	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
1011
	s->rstctl		= I915_READ(GEN6_RSTCTL);
1012
	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);
1013
 
1014
	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1015
	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
1016
	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
1017
	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
1018
	s->ecobus		= I915_READ(ECOBUS);
1019
	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
1020
	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
1021
	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
1022
	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
1023
	s->rcedata		= I915_READ(VLV_RCEDATA);
1024
	s->spare2gh		= I915_READ(VLV_SPAREG2H);
1025
 
1026
	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1027
	s->gt_imr		= I915_READ(GTIMR);
1028
	s->gt_ier		= I915_READ(GTIER);
1029
	s->pm_imr		= I915_READ(GEN6_PMIMR);
1030
	s->pm_ier		= I915_READ(GEN6_PMIER);
1031
 
1032
	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1033
		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
1034
 
1035
	/* GT SA CZ domain, 0x100000-0x138124 */
1036
	s->tilectl		= I915_READ(TILECTL);
1037
	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
1038
	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
1039
	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1040
	s->pmwgicz		= I915_READ(VLV_PMWGICZ);
1041
 
1042
	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
1043
	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
1044
	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
1045
	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);
1046
 
1047
	/*
1048
	 * Not saving any of:
1049
	 * DFT,		0x9800-0x9EC0
1050
	 * SARB,	0xB000-0xB1FC
1051
	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
1052
	 * PCI CFG
1053
	 */
1054
}
1055
 
1056
static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1057
{
1058
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1059
	u32 val;
1060
	int i;
1061
 
1062
	/* GAM 0x4000-0x4770 */
1063
	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
1064
	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
1065
	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
1066
	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
1067
	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);
1068
 
1069
	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1070
		I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
1071
 
1072
	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
1073
	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
1074
 
1075
	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
1076
	I915_WRITE(GAM_ECOCHK,		s->ecochk);
1077
	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
1078
	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);
1079
 
1080
	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);
1081
 
1082
	/* MBC 0x9024-0x91D0, 0x8500 */
1083
	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
1084
	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
1085
	I915_WRITE(GEN6_MBCTL,		s->mbctl);
1086
 
1087
	/* GCP 0x9400-0x9424, 0x8100-0x810C */
1088
	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
1089
	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
1090
	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
1091
	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
1092
	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
1093
	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);
1094
 
1095
	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1096
	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
1097
	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
1098
	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
1099
	I915_WRITE(ECOBUS,		s->ecobus);
1100
	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
1101
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
1102
	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
1103
	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
1104
	I915_WRITE(VLV_RCEDATA,		s->rcedata);
1105
	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);
1106
 
1107
	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1108
	I915_WRITE(GTIMR,		s->gt_imr);
1109
	I915_WRITE(GTIER,		s->gt_ier);
1110
	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
1111
	I915_WRITE(GEN6_PMIER,		s->pm_ier);
1112
 
1113
	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1114
		I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
1115
 
1116
	/* GT SA CZ domain, 0x100000-0x138124 */
1117
	I915_WRITE(TILECTL,			s->tilectl);
1118
	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
1119
	/*
1120
	 * Preserve the GT allow wake and GFX force clock bit, they are not
1121
	 * be restored, as they are used to control the s0ix suspend/resume
1122
	 * sequence by the caller.
1123
	 */
1124
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
1125
	val &= VLV_GTLC_ALLOWWAKEREQ;
1126
	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
1127
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1128
 
1129
	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1130
	val &= VLV_GFX_CLK_FORCE_ON_BIT;
1131
	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
1132
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1133
 
1134
	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);
1135
 
1136
	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
1137
	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
1138
	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
1139
	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
1140
}
4104 Serge 1141
#endif
1142
 
5060 serge 1143
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1144
{
1145
	u32 val;
1146
	int err;
1147
 
1148
	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1149
	WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
1150
 
1151
#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1152
	/* Wait for a previous force-off to settle */
1153
	if (force_on) {
1154
		err = wait_for(!COND, 20);
1155
		if (err) {
1156
			DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
1157
				  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1158
			return err;
1159
		}
1160
	}
1161
 
1162
	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1163
	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1164
	if (force_on)
1165
		val |= VLV_GFX_CLK_FORCE_ON_BIT;
1166
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1167
 
1168
	if (!force_on)
1169
		return 0;
1170
 
1171
	err = wait_for(COND, 20);
1172
	if (err)
1173
		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1174
			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1175
 
1176
	return err;
1177
#undef COND
1178
}
1179
#if 0
1180
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
1181
{
1182
	u32 val;
1183
	int err = 0;
1184
 
1185
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
1186
	val &= ~VLV_GTLC_ALLOWWAKEREQ;
1187
	if (allow)
1188
		val |= VLV_GTLC_ALLOWWAKEREQ;
1189
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1190
	POSTING_READ(VLV_GTLC_WAKE_CTRL);
1191
 
1192
#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1193
	      allow)
1194
	err = wait_for(COND, 1);
1195
	if (err)
1196
		DRM_ERROR("timeout disabling GT waking\n");
1197
	return err;
1198
#undef COND
1199
}
1200
 
1201
static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1202
				 bool wait_for_on)
1203
{
1204
	u32 mask;
1205
	u32 val;
1206
	int err;
1207
 
1208
	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
1209
	val = wait_for_on ? mask : 0;
1210
#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1211
	if (COND)
1212
		return 0;
1213
 
1214
	DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
1215
			wait_for_on ? "on" : "off",
1216
			I915_READ(VLV_GTLC_PW_STATUS));
1217
 
1218
	/*
1219
	 * RC6 transitioning can be delayed up to 2 msec (see
1220
	 * valleyview_enable_rps), use 3 msec for safety.
1221
	 */
1222
	err = wait_for(COND, 3);
1223
	if (err)
1224
		DRM_ERROR("timeout waiting for GT wells to go %s\n",
1225
			  wait_for_on ? "on" : "off");
1226
 
1227
	return err;
1228
#undef COND
1229
}
1230
 
1231
static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
1232
{
1233
	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
1234
		return;
1235
 
1236
	DRM_ERROR("GT register access while GT waking disabled\n");
1237
	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1238
}
1239
 
5354 serge 1240
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
5060 serge 1241
{
1242
	u32 mask;
1243
	int err;
1244
 
1245
	/*
1246
	 * Bspec defines the following GT well on flags as debug only, so
1247
	 * don't treat them as hard failures.
1248
	 */
1249
	(void)vlv_wait_for_gt_wells(dev_priv, false);
1250
 
1251
	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
1252
	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
1253
 
1254
	vlv_check_no_gt_access(dev_priv);
1255
 
1256
	err = vlv_force_gfx_clock(dev_priv, true);
1257
	if (err)
1258
		goto err1;
1259
 
1260
	err = vlv_allow_gt_wake(dev_priv, false);
1261
	if (err)
1262
		goto err2;
1263
	vlv_save_gunit_s0ix_state(dev_priv);
1264
 
1265
	err = vlv_force_gfx_clock(dev_priv, false);
1266
	if (err)
1267
		goto err2;
1268
 
1269
	return 0;
1270
 
1271
err2:
1272
	/* For safety always re-enable waking and disable gfx clock forcing */
1273
	vlv_allow_gt_wake(dev_priv, true);
1274
err1:
1275
	vlv_force_gfx_clock(dev_priv, false);
1276
 
1277
	return err;
1278
}
1279
 
5354 serge 1280
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1281
				bool rpm_resume)
5060 serge 1282
{
1283
	struct drm_device *dev = dev_priv->dev;
1284
	int err;
1285
	int ret;
1286
 
1287
	/*
1288
	 * If any of the steps fail just try to continue, that's the best we
1289
	 * can do at this point. Return the first error code (which will also
1290
	 * leave RPM permanently disabled).
1291
	 */
1292
	ret = vlv_force_gfx_clock(dev_priv, true);
1293
 
1294
	vlv_restore_gunit_s0ix_state(dev_priv);
1295
 
1296
	err = vlv_allow_gt_wake(dev_priv, true);
1297
	if (!ret)
1298
		ret = err;
1299
 
1300
	err = vlv_force_gfx_clock(dev_priv, false);
1301
	if (!ret)
1302
		ret = err;
1303
 
1304
	vlv_check_no_gt_access(dev_priv);
1305
 
5354 serge 1306
	if (rpm_resume) {
5060 serge 1307
	intel_init_clock_gating(dev);
1308
	i915_gem_restore_fences(dev);
5354 serge 1309
	}
5060 serge 1310
 
1311
	return ret;
1312
}
1313
 
1314
static int intel_runtime_suspend(struct device *device)
1315
{
1316
	struct pci_dev *pdev = to_pci_dev(device);
1317
	struct drm_device *dev = pci_get_drvdata(pdev);
1318
	struct drm_i915_private *dev_priv = dev->dev_private;
1319
	int ret;
1320
 
1321
	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1322
		return -ENODEV;
1323
 
5354 serge 1324
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1325
		return -ENODEV;
1326
 
5060 serge 1327
	assert_force_wake_inactive(dev_priv);
1328
 
1329
	DRM_DEBUG_KMS("Suspending device\n");
1330
 
1331
	/*
1332
	 * We could deadlock here in case another thread holding struct_mutex
1333
	 * calls RPM suspend concurrently, since the RPM suspend will wait
1334
	 * first for this RPM suspend to finish. In this case the concurrent
1335
	 * RPM resume will be followed by its RPM suspend counterpart. Still
1336
	 * for consistency return -EAGAIN, which will reschedule this suspend.
1337
	 */
1338
	if (!mutex_trylock(&dev->struct_mutex)) {
1339
		DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1340
		/*
1341
		 * Bump the expiration timestamp, otherwise the suspend won't
1342
		 * be rescheduled.
1343
		 */
1344
		pm_runtime_mark_last_busy(device);
1345
 
1346
		return -EAGAIN;
1347
	}
1348
	/*
1349
	 * We are safe here against re-faults, since the fault handler takes
1350
	 * an RPM reference.
1351
	 */
1352
	i915_gem_release_all_mmaps(dev_priv);
1353
	mutex_unlock(&dev->struct_mutex);
1354
 
5354 serge 1355
	intel_suspend_gt_powersave(dev);
1356
	intel_runtime_pm_disable_interrupts(dev_priv);
5060 serge 1357
 
5354 serge 1358
	ret = intel_suspend_complete(dev_priv);
5060 serge 1359
	if (ret) {
1360
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
5354 serge 1361
		intel_runtime_pm_enable_interrupts(dev_priv);
5060 serge 1362
 
1363
		return ret;
1364
	}
1365
 
1366
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
1367
	dev_priv->pm.suspended = true;
1368
 
1369
	/*
5354 serge 1370
	 * FIXME: We really should find a document that references the arguments
1371
	 * used below!
1372
	 */
1373
	if (IS_HASWELL(dev)) {
1374
		/*
5060 serge 1375
	 * current versions of firmware which depend on this opregion
1376
	 * notification have repurposed the D1 definition to mean
1377
	 * "runtime suspended" vs. what you would normally expect (D3)
5354 serge 1378
		 * to distinguish it from notifications that might be sent via
1379
		 * the suspend path.
5060 serge 1380
	 */
1381
	intel_opregion_notify_adapter(dev, PCI_D1);
5354 serge 1382
	} else {
1383
		/*
1384
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1385
		 * being detected, and the call we do at intel_runtime_resume()
1386
		 * won't be able to restore them. Since PCI_D3hot matches the
1387
		 * actual specification and appears to be working, use it. Let's
1388
		 * assume the other non-Haswell platforms will stay the same as
1389
		 * Broadwell.
1390
		 */
1391
		intel_opregion_notify_adapter(dev, PCI_D3hot);
1392
	}
5060 serge 1393
 
1394
	DRM_DEBUG_KMS("Device suspended\n");
1395
	return 0;
1396
}
1397
 
1398
static int intel_runtime_resume(struct device *device)
1399
{
1400
	struct pci_dev *pdev = to_pci_dev(device);
1401
	struct drm_device *dev = pci_get_drvdata(pdev);
1402
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 1403
	int ret = 0;
5060 serge 1404
 
5354 serge 1405
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1406
		return -ENODEV;
5060 serge 1407
 
1408
	DRM_DEBUG_KMS("Resuming device\n");
1409
 
1410
	intel_opregion_notify_adapter(dev, PCI_D0);
1411
	dev_priv->pm.suspended = false;
1412
 
5354 serge 1413
	if (IS_GEN6(dev_priv))
1414
		intel_init_pch_refclk(dev);
1415
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1416
		hsw_disable_pc8(dev_priv);
1417
	else if (IS_VALLEYVIEW(dev_priv))
1418
		ret = vlv_resume_prepare(dev_priv, true);
5060 serge 1419
 
1420
	/*
1421
	 * No point of rolling back things in case of an error, as the best
1422
	 * we can do is to hope that things will still work (and disable RPM).
1423
	 */
1424
	i915_gem_init_swizzling(dev);
1425
	gen6_update_ring_freq(dev);
1426
 
5354 serge 1427
	intel_runtime_pm_enable_interrupts(dev_priv);
1428
	intel_enable_gt_powersave(dev);
5060 serge 1429
 
1430
	if (ret)
1431
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1432
	else
1433
		DRM_DEBUG_KMS("Device resumed\n");
1434
 
1435
	return ret;
1436
}
1437
 
5354 serge 1438
/*
1439
 * This function implements common functionality of runtime and system
1440
 * suspend sequence.
1441
 */
1442
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1443
{
1444
	struct drm_device *dev = dev_priv->dev;
1445
	int ret;
1446
 
1447
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1448
		ret = hsw_suspend_complete(dev_priv);
1449
	else if (IS_VALLEYVIEW(dev))
1450
		ret = vlv_suspend_complete(dev_priv);
1451
	else
1452
		ret = 0;
1453
 
1454
	return ret;
1455
}
1456
 
5060 serge 1457
static const struct dev_pm_ops i915_pm_ops = {
5354 serge 1458
	/*
1459
	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1460
	 * PMSG_RESUME]
1461
	 */
5060 serge 1462
	.suspend = i915_pm_suspend,
1463
	.suspend_late = i915_pm_suspend_late,
1464
	.resume_early = i915_pm_resume_early,
1465
	.resume = i915_pm_resume,
5354 serge 1466
 
1467
	/*
1468
	 * S4 event handlers
1469
	 * @freeze, @freeze_late    : called (1) before creating the
1470
	 *                            hibernation image [PMSG_FREEZE] and
1471
	 *                            (2) after rebooting, before restoring
1472
	 *                            the image [PMSG_QUIESCE]
1473
	 * @thaw, @thaw_early       : called (1) after creating the hibernation
1474
	 *                            image, before writing it [PMSG_THAW]
1475
	 *                            and (2) after failing to create or
1476
	 *                            restore the image [PMSG_RECOVER]
1477
	 * @poweroff, @poweroff_late: called after writing the hibernation
1478
	 *                            image, before rebooting [PMSG_HIBERNATE]
1479
	 * @restore, @restore_early : called after rebooting and restoring the
1480
	 *                            hibernation image [PMSG_RESTORE]
1481
	 */
1482
	.freeze = i915_pm_suspend,
1483
	.freeze_late = i915_pm_suspend_late,
1484
	.thaw_early = i915_pm_resume_early,
1485
	.thaw = i915_pm_resume,
1486
	.poweroff = i915_pm_suspend,
1487
	.poweroff_late = i915_pm_suspend_late,
5060 serge 1488
	.restore_early = i915_pm_resume_early,
1489
	.restore = i915_pm_resume,
5354 serge 1490
 
1491
	/* S0ix (via runtime suspend) event handlers */
5060 serge 1492
	.runtime_suspend = intel_runtime_suspend,
1493
	.runtime_resume = intel_runtime_resume,
1494
};
1495
 
1496
static const struct vm_operations_struct i915_gem_vm_ops = {
1497
	.fault = i915_gem_fault,
1498
	.open = drm_gem_vm_open,
1499
	.close = drm_gem_vm_close,
1500
};
1501
 
1502
static const struct file_operations i915_driver_fops = {
1503
	.owner = THIS_MODULE,
1504
	.open = drm_open,
1505
	.release = drm_release,
1506
	.unlocked_ioctl = drm_ioctl,
1507
	.mmap = drm_gem_mmap,
1508
	.poll = drm_poll,
1509
	.read = drm_read,
1510
#ifdef CONFIG_COMPAT
1511
	.compat_ioctl = i915_compat_ioctl,
1512
#endif
1513
	.llseek = noop_llseek,
1514
};
1515
#endif
1516
 
3260 Serge 1517
static struct drm_driver driver = {
1518
    /* Don't use MTRRs here; the Xserver or userspace app should
1519
     * deal with them for Intel hardware.
1520
     */
3482 Serge 1521
    .driver_features =
4560 Serge 1522
	    DRIVER_USE_AGP |
4104 Serge 1523
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1524
	    DRIVER_RENDER,
1525
    .load = i915_driver_load,
3260 Serge 1526
//    .unload = i915_driver_unload,
3263 Serge 1527
      .open = i915_driver_open,
3260 Serge 1528
//    .lastclose = i915_driver_lastclose,
1529
//    .preclose = i915_driver_preclose,
1530
//    .postclose = i915_driver_postclose,
1531
 
1532
    /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
1533
//    .suspend = i915_suspend,
1534
//    .resume = i915_resume,
1535
 
1536
//    .device_is_agp = i915_driver_device_is_agp,
4104 Serge 1537
#if defined(CONFIG_DEBUG_FS)
1538
	.debugfs_init = i915_debugfs_init,
1539
	.debugfs_cleanup = i915_debugfs_cleanup,
1540
#endif
3260 Serge 1541
    .gem_free_object = i915_gem_free_object,
1542
 
1543
//    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1544
//    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1545
//    .gem_prime_export = i915_gem_prime_export,
1546
//    .gem_prime_import = i915_gem_prime_import,
1547
 
1548
//    .dumb_create = i915_gem_dumb_create,
1549
//    .dumb_map_offset = i915_gem_mmap_gtt,
1550
//    .dumb_destroy = i915_gem_dumb_destroy,
1551
//    .ioctls = i915_ioctls,
1552
//    .fops = &i915_driver_fops,
1553
//    .name = DRIVER_NAME,
1554
//    .desc = DRIVER_DESC,
1555
//    .date = DRIVER_DATE,
1556
//    .major = DRIVER_MAJOR,
1557
//    .minor = DRIVER_MINOR,
1558
//    .patchlevel = DRIVER_PATCHLEVEL,
1559
};
1560
 
1561
 
3243 Serge 1562
 
3255 Serge 1563
 
4104 Serge 1564
int i915_init(void)
1565
{
1566
    static pci_dev_t device;
1567
    const struct pci_device_id  *ent;
1568
    int  err;
2325 Serge 1569
 
4104 Serge 1570
    ent = find_pci_device(&device, pciidlist);
1571
    if( unlikely(ent == NULL) )
1572
    {
1573
        dbgprintf("device not found\n");
1574
        return -ENODEV;
1575
    };
2325 Serge 1576
 
4104 Serge 1577
    drm_core_init();
3255 Serge 1578
 
4104 Serge 1579
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
1580
                                device.pci_dev.device);
2325 Serge 1581
 
4293 Serge 1582
    driver.driver_features |= DRIVER_MODESET;
1583
 
4104 Serge 1584
    err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
3263 Serge 1585
 
4104 Serge 1586
    return err;
1587
}
2325 Serge 1588