Subversion Repositories Kolibri OS

Rev

Rev 4398 | Rev 5060 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2325 Serge 1
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2
 */
3
/*
4
 *
5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6
 * All Rights Reserved.
7
 *
8
 * Permission is hereby granted, free of charge, to any person obtaining a
9
 * copy of this software and associated documentation files (the
10
 * "Software"), to deal in the Software without restriction, including
11
 * without limitation the rights to use, copy, modify, merge, publish,
12
 * distribute, sub license, and/or sell copies of the Software, and to
13
 * permit persons to whom the Software is furnished to do so, subject to
14
 * the following conditions:
15
 *
16
 * The above copyright notice and this permission notice (including the
17
 * next paragraph) shall be included in all copies or substantial portions
18
 * of the Software.
19
 *
20
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
 *
28
 */
29
 
2330 Serge 30
//#include 
3031 serge 31
#include 
32
#include 
2330 Serge 33
#include "i915_drv.h"
4126 Serge 34
#include "i915_trace.h"
2330 Serge 35
#include "intel_drv.h"
2325 Serge 36
 
37
#include 
38
#include 
39
#include 
40
#include 
41
 
3031 serge 42
#include 
43
 
2325 Serge 44
#include 
45
 
2330 Serge 46
#define __read_mostly
2327 Serge 47
 
2338 Serge 48
int init_display_kms(struct drm_device *dev);
2330 Serge 49
 
3031 serge 50
static int i915_modeset __read_mostly = 1;
3480 Serge 51
module_param_named(modeset, i915_modeset, int, 0400);
3031 serge 52
MODULE_PARM_DESC(modeset,
53
		"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
54
		"1=on, -1=force vga console preference [default])");
55
 
3746 Serge 56
unsigned int i915_fbpercrtc __always_unused = 0;
57
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
3031 serge 58
 
3480 Serge 59
int i915_panel_ignore_lid __read_mostly         =  1;
60
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
3031 serge 61
MODULE_PARM_DESC(panel_ignore_lid,
3480 Serge 62
		"Override lid status (0=autodetect, 1=autodetect disabled [default], "
63
		"-1=force lid closed, -2=force lid open)");
2330 Serge 64
 
4560 Serge 65
unsigned int i915_powersave __read_mostly = 1;
3480 Serge 66
module_param_named(powersave, i915_powersave, int, 0600);
3031 serge 67
MODULE_PARM_DESC(powersave,
68
		"Enable powersavings, fbc, downclocking, etc. (default: true)");
2330 Serge 69
 
3031 serge 70
int i915_semaphores __read_mostly = -1;
4560 Serge 71
module_param_named(semaphores, i915_semaphores, int, 0400);
3031 serge 72
MODULE_PARM_DESC(semaphores,
73
		"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
2330 Serge 74
 
4560 Serge 75
int i915_enable_rc6 __read_mostly = -1;
3480 Serge 76
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
3031 serge 77
MODULE_PARM_DESC(i915_enable_rc6,
78
		"Enable power-saving render C-state 6. "
79
		"Different stages can be selected via bitmask values "
80
		"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
81
		"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
82
		"default: -1 (use per-chip default)");
83
 
4560 Serge 84
int i915_enable_fbc __read_mostly = -1;
3480 Serge 85
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
3031 serge 86
MODULE_PARM_DESC(i915_enable_fbc,
87
		"Enable frame buffer compression for power savings "
88
		"(default: -1 (use per-chip default))");
89
 
2330 Serge 90
unsigned int i915_lvds_downclock  __read_mostly =  0;
3480 Serge 91
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
3031 serge 92
MODULE_PARM_DESC(lvds_downclock,
93
		"Use panel (LVDS/eDP) downclocking for power savings "
94
		"(default: false)");
2330 Serge 95
 
3031 serge 96
int i915_lvds_channel_mode __read_mostly;
3480 Serge 97
module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
3031 serge 98
MODULE_PARM_DESC(lvds_channel_mode,
99
		 "Specify LVDS channel mode "
100
		 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
2330 Serge 101
 
3031 serge 102
int i915_panel_use_ssc __read_mostly = -1;
3480 Serge 103
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
3031 serge 104
MODULE_PARM_DESC(lvds_use_ssc,
105
		"Use Spread Spectrum Clock with panels [LVDS/eDP] "
106
		"(default: auto from VBT)");
107
 
2332 Serge 108
int i915_vbt_sdvo_panel_type __read_mostly      = -1;
3480 Serge 109
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
3031 serge 110
MODULE_PARM_DESC(vbt_sdvo_panel_type,
111
		"Override/Ignore selection of SDVO panel mode in the VBT "
112
		"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
2330 Serge 113
 
3031 serge 114
static bool i915_try_reset __read_mostly = true;
3480 Serge 115
module_param_named(reset, i915_try_reset, bool, 0600);
3031 serge 116
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
117
 
118
bool i915_enable_hangcheck __read_mostly = false;
3480 Serge 119
module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
3031 serge 120
MODULE_PARM_DESC(enable_hangcheck,
121
		"Periodically check GPU activity for detecting hangs. "
122
		"WARNING: Disabling this can cause system wide hangs. "
123
		"(default: true)");
124
 
4560 Serge 125
int i915_enable_ppgtt __read_mostly = -1;
126
module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400);
3031 serge 127
MODULE_PARM_DESC(i915_enable_ppgtt,
128
		"Enable PPGTT (default: true)");
129
 
4104 Serge 130
int i915_enable_psr __read_mostly = 0;
131
module_param_named(enable_psr, i915_enable_psr, int, 0600);
132
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
133
 
134
unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
3480 Serge 135
module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
3031 serge 136
MODULE_PARM_DESC(preliminary_hw_support,
4104 Serge 137
		"Enable preliminary hardware support.");
3031 serge 138
 
4126 Serge 139
int i915_disable_power_well __read_mostly = 1;
3482 Serge 140
module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
141
MODULE_PARM_DESC(disable_power_well,
4104 Serge 142
		 "Disable the power well when possible (default: true)");
3031 serge 143
 
4104 Serge 144
int i915_enable_ips __read_mostly = 1;
145
module_param_named(enable_ips, i915_enable_ips, int, 0600);
146
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
147
 
148
bool i915_fastboot __read_mostly = 0;
149
module_param_named(fastboot, i915_fastboot, bool, 0600);
150
MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
151
		 "(default: false)");
152
 
4280 Serge 153
int i915_enable_pc8 __read_mostly = 0;
4104 Serge 154
module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
155
MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
156
 
157
int i915_pc8_timeout __read_mostly = 5000;
158
module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
159
MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
160
 
161
bool i915_prefault_disable __read_mostly;
162
module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
163
MODULE_PARM_DESC(prefault_disable,
164
		"Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
165
 
166
static struct drm_driver driver;
167
extern int intel_agp_enabled;
168
 
2326 Serge 169
#define PCI_VENDOR_ID_INTEL        0x8086
170
 
2325 Serge 171
 
2339 Serge 172
static const struct intel_device_info intel_i915g_info = {
3746 Serge 173
	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
2339 Serge 174
	.has_overlay = 1, .overlay_needs_physical = 1,
4560 Serge 175
	.ring_mask = RENDER_RING,
2339 Serge 176
};
177
static const struct intel_device_info intel_i915gm_info = {
3746 Serge 178
	.gen = 3, .is_mobile = 1, .num_pipes = 2,
2339 Serge 179
	.cursor_needs_physical = 1,
180
	.has_overlay = 1, .overlay_needs_physical = 1,
181
	.supports_tv = 1,
4560 Serge 182
	.has_fbc = 1,
183
	.ring_mask = RENDER_RING,
2339 Serge 184
};
185
static const struct intel_device_info intel_i945g_info = {
3746 Serge 186
	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
2339 Serge 187
	.has_overlay = 1, .overlay_needs_physical = 1,
4560 Serge 188
	.ring_mask = RENDER_RING,
2339 Serge 189
};
190
static const struct intel_device_info intel_i945gm_info = {
3746 Serge 191
	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
2339 Serge 192
	.has_hotplug = 1, .cursor_needs_physical = 1,
193
	.has_overlay = 1, .overlay_needs_physical = 1,
194
	.supports_tv = 1,
4560 Serge 195
	.has_fbc = 1,
196
	.ring_mask = RENDER_RING,
2339 Serge 197
};
198
 
199
static const struct intel_device_info intel_i965g_info = {
3746 Serge 200
	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
2339 Serge 201
	.has_hotplug = 1,
202
	.has_overlay = 1,
4560 Serge 203
	.ring_mask = RENDER_RING,
2339 Serge 204
};
205
 
206
static const struct intel_device_info intel_i965gm_info = {
3746 Serge 207
	.gen = 4, .is_crestline = 1, .num_pipes = 2,
2339 Serge 208
	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
209
	.has_overlay = 1,
210
	.supports_tv = 1,
4560 Serge 211
	.ring_mask = RENDER_RING,
2339 Serge 212
};
213
 
214
static const struct intel_device_info intel_g33_info = {
3746 Serge 215
	.gen = 3, .is_g33 = 1, .num_pipes = 2,
2339 Serge 216
	.need_gfx_hws = 1, .has_hotplug = 1,
217
	.has_overlay = 1,
4560 Serge 218
	.ring_mask = RENDER_RING,
2339 Serge 219
};
220
 
221
static const struct intel_device_info intel_g45_info = {
3746 Serge 222
	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
2339 Serge 223
	.has_pipe_cxsr = 1, .has_hotplug = 1,
4560 Serge 224
	.ring_mask = RENDER_RING | BSD_RING,
2339 Serge 225
};
226
 
227
static const struct intel_device_info intel_gm45_info = {
3746 Serge 228
	.gen = 4, .is_g4x = 1, .num_pipes = 2,
2339 Serge 229
	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
230
	.has_pipe_cxsr = 1, .has_hotplug = 1,
231
	.supports_tv = 1,
4560 Serge 232
	.ring_mask = RENDER_RING | BSD_RING,
2339 Serge 233
};
234
 
235
static const struct intel_device_info intel_pineview_info = {
3746 Serge 236
	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
2339 Serge 237
	.need_gfx_hws = 1, .has_hotplug = 1,
238
	.has_overlay = 1,
239
};
240
 
241
static const struct intel_device_info intel_ironlake_d_info = {
3746 Serge 242
	.gen = 5, .num_pipes = 2,
3031 serge 243
	.need_gfx_hws = 1, .has_hotplug = 1,
4560 Serge 244
	.ring_mask = RENDER_RING | BSD_RING,
2339 Serge 245
};
246
 
247
static const struct intel_device_info intel_ironlake_m_info = {
3746 Serge 248
	.gen = 5, .is_mobile = 1, .num_pipes = 2,
2339 Serge 249
	.need_gfx_hws = 1, .has_hotplug = 1,
250
	.has_fbc = 1,
4560 Serge 251
	.ring_mask = RENDER_RING | BSD_RING,
2339 Serge 252
};
253
 
2325 Serge 254
static const struct intel_device_info intel_sandybridge_d_info = {
3746 Serge 255
	.gen = 6, .num_pipes = 2,
2330 Serge 256
	.need_gfx_hws = 1, .has_hotplug = 1,
4560 Serge 257
	.has_fbc = 1,
258
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
3031 serge 259
	.has_llc = 1,
2325 Serge 260
};
261
 
262
static const struct intel_device_info intel_sandybridge_m_info = {
3746 Serge 263
	.gen = 6, .is_mobile = 1, .num_pipes = 2,
2330 Serge 264
	.need_gfx_hws = 1, .has_hotplug = 1,
2325 Serge 265
    .has_fbc      = 1,
4560 Serge 266
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
3031 serge 267
	.has_llc = 1,
2325 Serge 268
};
269
 
3746 Serge 270
#define GEN7_FEATURES  \
271
	.gen = 7, .num_pipes = 3, \
272
	.need_gfx_hws = 1, .has_hotplug = 1, \
4560 Serge 273
	.has_fbc = 1, \
274
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
275
	.has_llc = 1
3746 Serge 276
 
2339 Serge 277
static const struct intel_device_info intel_ivybridge_d_info = {
3746 Serge 278
	GEN7_FEATURES,
279
	.is_ivybridge = 1,
2339 Serge 280
};
2325 Serge 281
 
2339 Serge 282
static const struct intel_device_info intel_ivybridge_m_info = {
3746 Serge 283
	GEN7_FEATURES,
284
	.is_ivybridge = 1,
285
	.is_mobile = 1,
2339 Serge 286
};
287
 
3746 Serge 288
static const struct intel_device_info intel_ivybridge_q_info = {
289
	GEN7_FEATURES,
290
	.is_ivybridge = 1,
291
	.num_pipes = 0, /* legal, last one wins */
292
};
293
 
3031 serge 294
static const struct intel_device_info intel_valleyview_m_info = {
3746 Serge 295
	GEN7_FEATURES,
296
	.is_mobile = 1,
297
	.num_pipes = 2,
3031 serge 298
	.is_valleyview = 1,
3480 Serge 299
	.display_mmio_offset = VLV_DISPLAY_BASE,
4560 Serge 300
	.has_fbc = 0, /* legal, last one wins */
3746 Serge 301
	.has_llc = 0, /* legal, last one wins */
3031 serge 302
};
303
 
304
static const struct intel_device_info intel_valleyview_d_info = {
3746 Serge 305
	GEN7_FEATURES,
306
	.num_pipes = 2,
3031 serge 307
	.is_valleyview = 1,
3480 Serge 308
	.display_mmio_offset = VLV_DISPLAY_BASE,
4560 Serge 309
	.has_fbc = 0, /* legal, last one wins */
3746 Serge 310
	.has_llc = 0, /* legal, last one wins */
3031 serge 311
};
312
 
313
static const struct intel_device_info intel_haswell_d_info = {
3746 Serge 314
	GEN7_FEATURES,
315
	.is_haswell = 1,
4104 Serge 316
	.has_ddi = 1,
317
	.has_fpga_dbg = 1,
4560 Serge 318
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
3031 serge 319
};
320
 
321
static const struct intel_device_info intel_haswell_m_info = {
3746 Serge 322
	GEN7_FEATURES,
323
	.is_haswell = 1,
324
	.is_mobile = 1,
4104 Serge 325
	.has_ddi = 1,
326
	.has_fpga_dbg = 1,
4560 Serge 327
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
3031 serge 328
};
329
 
4560 Serge 330
static const struct intel_device_info intel_broadwell_d_info = {
331
	.gen = 8, .num_pipes = 3,
332
	.need_gfx_hws = 1, .has_hotplug = 1,
333
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
334
	.has_llc = 1,
335
	.has_ddi = 1,
336
};
337
 
338
static const struct intel_device_info intel_broadwell_m_info = {
339
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
340
	.need_gfx_hws = 1, .has_hotplug = 1,
341
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
342
	.has_llc = 1,
343
	.has_ddi = 1,
344
};
345
 
4104 Serge 346
/*
347
 * Make sure any device matches here are from most specific to most
348
 * general.  For example, since the Quanta match is based on the subsystem
349
 * and subvendor IDs, we need it to come before the more general IVB
350
 * PCI ID matches, otherwise we'll use the wrong info struct above.
351
 */
352
#define INTEL_PCI_IDS \
353
	INTEL_I915G_IDS(&intel_i915g_info),	\
354
	INTEL_I915GM_IDS(&intel_i915gm_info),	\
355
	INTEL_I945G_IDS(&intel_i945g_info),	\
356
	INTEL_I945GM_IDS(&intel_i945gm_info),	\
357
	INTEL_I965G_IDS(&intel_i965g_info),	\
358
	INTEL_G33_IDS(&intel_g33_info),		\
359
	INTEL_I965GM_IDS(&intel_i965gm_info),	\
360
	INTEL_GM45_IDS(&intel_gm45_info), 	\
361
	INTEL_G45_IDS(&intel_g45_info), 	\
362
	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
363
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
364
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
365
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
366
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
367
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
368
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
369
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
370
	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
371
	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
372
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
4560 Serge 373
	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
374
	INTEL_BDW_M_IDS(&intel_broadwell_m_info),	\
375
	INTEL_BDW_D_IDS(&intel_broadwell_d_info)
4104 Serge 376
 
2325 Serge 377
static const struct pci_device_id pciidlist[] = {       /* aka */
4104 Serge 378
	INTEL_PCI_IDS,
2325 Serge 379
    {0, 0, 0}
380
};
381
 
2326 Serge 382
#define INTEL_PCH_DEVICE_ID_MASK        0xff00
383
#define INTEL_PCH_IBX_DEVICE_ID_TYPE    0x3b00
384
#define INTEL_PCH_CPT_DEVICE_ID_TYPE    0x1c00
385
#define INTEL_PCH_PPT_DEVICE_ID_TYPE    0x1e00
3031 serge 386
#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
2325 Serge 387
 
2342 Serge 388
void intel_detect_pch(struct drm_device *dev)
2326 Serge 389
{
390
    struct drm_i915_private *dev_priv = dev->dev_private;
391
    struct pci_dev *pch;
392
 
3746 Serge 393
	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
394
	 * (which really amounts to a PCH but no South Display).
395
	 */
396
	if (INTEL_INFO(dev)->num_pipes == 0) {
397
		dev_priv->pch_type = PCH_NOP;
398
		return;
399
	}
400
 
2326 Serge 401
    /*
402
     * The reason to probe ISA bridge instead of Dev31:Fun0 is to
403
     * make graphics device passthrough work easy for VMM, that only
404
     * need to expose ISA bridge to let driver know the real hardware
405
     * underneath. This is a requirement from virtualization team.
4104 Serge 406
	 *
407
	 * In some virtualized environments (e.g. XEN), there is irrelevant
408
	 * ISA bridge in the system. To work reliably, we should scan trhough
409
	 * all the ISA bridge devices and check for the first match, instead
410
	 * of only checking the first one.
2326 Serge 411
     */
412
    pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
4104 Serge 413
	while (pch) {
414
		struct pci_dev *curr = pch;
2326 Serge 415
        if (pch->vendor == PCI_VENDOR_ID_INTEL) {
3243 Serge 416
			unsigned short id;
2326 Serge 417
            id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
3243 Serge 418
			dev_priv->pch_id = id;
2326 Serge 419
 
420
            if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
421
                dev_priv->pch_type = PCH_IBX;
422
                DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
3243 Serge 423
				WARN_ON(!IS_GEN5(dev));
2326 Serge 424
            } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
425
                dev_priv->pch_type = PCH_CPT;
426
                DRM_DEBUG_KMS("Found CougarPoint PCH\n");
3243 Serge 427
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
2326 Serge 428
            } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
429
                /* PantherPoint is CPT compatible */
430
                dev_priv->pch_type = PCH_CPT;
4560 Serge 431
				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
3243 Serge 432
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
3031 serge 433
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
434
				dev_priv->pch_type = PCH_LPT;
435
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
3243 Serge 436
				WARN_ON(!IS_HASWELL(dev));
3746 Serge 437
				WARN_ON(IS_ULT(dev));
4560 Serge 438
			} else if (IS_BROADWELL(dev)) {
439
				dev_priv->pch_type = PCH_LPT;
440
				dev_priv->pch_id =
441
					INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
442
				DRM_DEBUG_KMS("This is Broadwell, assuming "
443
					      "LynxPoint LP PCH\n");
3243 Serge 444
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
445
				dev_priv->pch_type = PCH_LPT;
446
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
447
				WARN_ON(!IS_HASWELL(dev));
3746 Serge 448
				WARN_ON(!IS_ULT(dev));
4104 Serge 449
			} else {
450
				goto check_next;
2326 Serge 451
            }
4104 Serge 452
			break;
2326 Serge 453
        }
4104 Serge 454
check_next:
455
		pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
456
//       pci_dev_put(curr);
2326 Serge 457
    }
4104 Serge 458
	if (!pch)
459
		DRM_DEBUG_KMS("No PCH found?\n");
2326 Serge 460
}
461
 
3031 serge 462
bool i915_semaphore_is_enabled(struct drm_device *dev)
2326 Serge 463
{
3031 serge 464
	if (INTEL_INFO(dev)->gen < 6)
4560 Serge 465
		return false;
2326 Serge 466
 
4560 Serge 467
	/* Until we get further testing... */
468
	if (IS_GEN8(dev)) {
469
		WARN_ON(!i915_preliminary_hw_support);
470
		return false;
471
	}
472
 
3031 serge 473
	if (i915_semaphores >= 0)
474
		return i915_semaphores;
2326 Serge 475
 
3031 serge 476
#ifdef CONFIG_INTEL_IOMMU
477
	/* Enable semaphores on SNB when IO remapping is off */
478
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
479
		return false;
480
#endif
2326 Serge 481
 
4560 Serge 482
	return true;
2326 Serge 483
}
484
 
4104 Serge 485
#if 0
486
static int i915_drm_freeze(struct drm_device *dev)
487
{
488
	struct drm_i915_private *dev_priv = dev->dev_private;
489
	struct drm_crtc *crtc;
2342 Serge 490
 
4560 Serge 491
	intel_runtime_pm_get(dev_priv);
492
 
4104 Serge 493
	/* ignore lid events during suspend */
494
	mutex_lock(&dev_priv->modeset_restore_lock);
495
	dev_priv->modeset_restore = MODESET_SUSPENDED;
496
	mutex_unlock(&dev_priv->modeset_restore_lock);
2342 Serge 497
 
4104 Serge 498
	/* We do a lot of poking in a lot of registers, make sure they work
499
	 * properly. */
500
	hsw_disable_package_c8(dev_priv);
4560 Serge 501
	intel_display_set_init_power(dev, true);
2342 Serge 502
 
4104 Serge 503
	drm_kms_helper_poll_disable(dev);
2342 Serge 504
 
4104 Serge 505
	pci_save_state(dev->pdev);
2325 Serge 506
 
4104 Serge 507
	/* If KMS is active, we do the leavevt stuff here */
508
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
509
		int error;
510
 
4560 Serge 511
		error = i915_gem_suspend(dev);
4104 Serge 512
		if (error) {
513
			dev_err(&dev->pdev->dev,
514
				"GEM idle failed, resume might fail\n");
515
			return error;
516
		}
517
 
518
		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
519
 
520
		drm_irq_uninstall(dev);
521
		dev_priv->enable_hotplug_processing = false;
522
		/*
523
		 * Disable CRTCs directly since we want to preserve sw state
524
		 * for _thaw.
525
		 */
4560 Serge 526
		mutex_lock(&dev->mode_config.mutex);
4104 Serge 527
		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
528
			dev_priv->display.crtc_disable(crtc);
4560 Serge 529
		mutex_unlock(&dev->mode_config.mutex);
4104 Serge 530
 
531
		intel_modeset_suspend_hw(dev);
532
	}
533
 
4560 Serge 534
	i915_gem_suspend_gtt_mappings(dev);
535
 
4104 Serge 536
	i915_save_state(dev);
537
 
538
	intel_opregion_fini(dev);
539
 
540
	console_lock();
541
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
542
	console_unlock();
543
 
544
	return 0;
545
}
546
 
547
int i915_suspend(struct drm_device *dev, pm_message_t state)
2325 Serge 548
{
4104 Serge 549
	int error;
2325 Serge 550
 
4104 Serge 551
	if (!dev || !dev->dev_private) {
552
		DRM_ERROR("dev: %p\n", dev);
553
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
554
		return -ENODEV;
555
	}
2325 Serge 556
 
4104 Serge 557
	if (state.event == PM_EVENT_PRETHAW)
558
		return 0;
3031 serge 559
 
560
 
4104 Serge 561
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
562
		return 0;
2325 Serge 563
 
4104 Serge 564
	error = i915_drm_freeze(dev);
565
	if (error)
566
		return error;
3031 serge 567
 
4104 Serge 568
	if (state.event == PM_EVENT_SUSPEND) {
569
		/* Shut down the device */
570
		pci_disable_device(dev->pdev);
571
		pci_set_power_state(dev->pdev, PCI_D3hot);
572
	}
3031 serge 573
 
4104 Serge 574
	return 0;
575
}
2325 Serge 576
 
4104 Serge 577
void intel_console_resume(struct work_struct *work)
578
{
579
	struct drm_i915_private *dev_priv =
580
		container_of(work, struct drm_i915_private,
581
			     console_resume_work);
582
	struct drm_device *dev = dev_priv->dev;
583
 
584
	console_lock();
585
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
586
	console_unlock();
2325 Serge 587
}
588
 
4104 Serge 589
static void intel_resume_hotplug(struct drm_device *dev)
590
{
591
	struct drm_mode_config *mode_config = &dev->mode_config;
592
	struct intel_encoder *encoder;
3260 Serge 593
 
4104 Serge 594
	mutex_lock(&mode_config->mutex);
595
	DRM_DEBUG_KMS("running encoder hotplug functions\n");
3260 Serge 596
 
4104 Serge 597
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
598
		if (encoder->hot_plug)
599
			encoder->hot_plug(encoder);
600
 
601
	mutex_unlock(&mode_config->mutex);
602
 
603
	/* Just fire off a uevent and let userspace tell us what to do */
604
	drm_helper_hpd_irq_event(dev);
605
}
4126 Serge 606
 
4560 Serge 607
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
4104 Serge 608
{
609
	struct drm_i915_private *dev_priv = dev->dev_private;
610
	int error = 0;
611
 
4560 Serge 612
	intel_uncore_early_sanitize(dev);
613
 
614
	intel_uncore_sanitize(dev);
615
 
616
	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
617
	    restore_gtt_mappings) {
618
		mutex_lock(&dev->struct_mutex);
619
		i915_gem_restore_gtt_mappings(dev);
620
		mutex_unlock(&dev->struct_mutex);
621
	}
622
 
623
	intel_power_domains_init_hw(dev);
624
 
4104 Serge 625
	i915_restore_state(dev);
626
	intel_opregion_setup(dev);
627
 
628
	/* KMS EnterVT equivalent */
629
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
630
		intel_init_pch_refclk(dev);
631
 
632
		mutex_lock(&dev->struct_mutex);
633
 
634
		error = i915_gem_init_hw(dev);
635
		mutex_unlock(&dev->struct_mutex);
636
 
637
		/* We need working interrupts for modeset enabling ... */
638
		drm_irq_install(dev);
639
 
640
		intel_modeset_init_hw(dev);
641
 
642
		drm_modeset_lock_all(dev);
4560 Serge 643
		drm_mode_config_reset(dev);
4104 Serge 644
		intel_modeset_setup_hw_state(dev, true);
645
		drm_modeset_unlock_all(dev);
646
 
647
		/*
648
		 * ... but also need to make sure that hotplug processing
649
		 * doesn't cause havoc. Like in the driver load code we don't
650
		 * bother with the tiny race here where we might loose hotplug
651
		 * notifications.
652
		 * */
653
		intel_hpd_init(dev);
654
		dev_priv->enable_hotplug_processing = true;
655
		/* Config may have changed between suspend and resume */
656
		intel_resume_hotplug(dev);
657
	}
658
 
659
	intel_opregion_init(dev);
660
 
661
	/*
662
	 * The console lock can be pretty contented on resume due
663
	 * to all the printk activity.  Try to keep it out of the hot
664
	 * path of resume if possible.
665
	 */
666
	if (console_trylock()) {
667
		intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
668
		console_unlock();
669
	} else {
670
		schedule_work(&dev_priv->console_resume_work);
671
	}
672
 
673
	/* Undo what we did at i915_drm_freeze so the refcount goes back to the
674
	 * expected level. */
675
	hsw_enable_package_c8(dev_priv);
676
 
677
	mutex_lock(&dev_priv->modeset_restore_lock);
678
	dev_priv->modeset_restore = MODESET_DONE;
679
	mutex_unlock(&dev_priv->modeset_restore_lock);
4560 Serge 680
 
681
	intel_runtime_pm_put(dev_priv);
4104 Serge 682
	return error;
683
}
684
 
685
static int i915_drm_thaw(struct drm_device *dev)
686
{
4560 Serge 687
	if (drm_core_check_feature(dev, DRIVER_MODESET))
4293 Serge 688
		i915_check_and_clear_faults(dev);
4104 Serge 689
 
4560 Serge 690
	return __i915_drm_thaw(dev, true);
4104 Serge 691
}
692
 
693
int i915_resume(struct drm_device *dev)
694
{
695
	struct drm_i915_private *dev_priv = dev->dev_private;
696
	int ret;
697
 
698
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
699
		return 0;
700
 
701
	if (pci_enable_device(dev->pdev))
702
		return -EIO;
703
 
704
	pci_set_master(dev->pdev);
705
 
706
	/*
707
	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
4560 Serge 708
	 * earlier) need to restore the GTT mappings since the BIOS might clear
709
	 * all our scratch PTEs.
4104 Serge 710
	 */
4560 Serge 711
	ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
4104 Serge 712
	if (ret)
713
		return ret;
714
 
715
	drm_kms_helper_poll_enable(dev);
716
	return 0;
717
}
718
 
719
/**
720
 * i915_reset - reset chip after a hang
721
 * @dev: drm device to reset
722
 *
723
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
724
 * reset or otherwise an error code.
725
 *
726
 * Procedure is fairly simple:
727
 *   - reset the chip using the reset reg
728
 *   - re-init context state
729
 *   - re-init hardware status page
730
 *   - re-init ring buffer
731
 *   - re-init interrupt state
732
 *   - re-init display
733
 */
734
int i915_reset(struct drm_device *dev)
735
{
736
	drm_i915_private_t *dev_priv = dev->dev_private;
737
	bool simulated;
738
	int ret;
739
 
740
	if (!i915_try_reset)
741
		return 0;
742
 
743
	mutex_lock(&dev->struct_mutex);
744
 
745
	i915_gem_reset(dev);
746
 
747
	simulated = dev_priv->gpu_error.stop_rings != 0;
748
 
749
		ret = intel_gpu_reset(dev);
750
 
751
		/* Also reset the gpu hangman. */
752
		if (simulated) {
753
			DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
754
			dev_priv->gpu_error.stop_rings = 0;
755
			if (ret == -ENODEV) {
4560 Serge 756
			DRM_INFO("Reset not implemented, but ignoring "
4104 Serge 757
					  "error for simulated gpu hangs\n");
758
				ret = 0;
759
			}
760
	}
4560 Serge 761
 
4104 Serge 762
	if (ret) {
4560 Serge 763
		DRM_ERROR("Failed to reset chip: %i\n", ret);
4104 Serge 764
		mutex_unlock(&dev->struct_mutex);
765
		return ret;
766
	}
767
 
768
	/* Ok, now get things going again... */
769
 
770
	/*
771
	 * Everything depends on having the GTT running, so we need to start
772
	 * there.  Fortunately we don't need to do this unless we reset the
773
	 * chip at a PCI level.
774
	 *
775
	 * Next we need to restore the context, but we don't use those
776
	 * yet either...
777
	 *
778
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
779
	 * was running at the time of the reset (i.e. we weren't VT
780
	 * switched away).
781
	 */
782
	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
783
			!dev_priv->ums.mm_suspended) {
784
		dev_priv->ums.mm_suspended = 0;
785
 
4560 Serge 786
		ret = i915_gem_init_hw(dev);
787
		mutex_unlock(&dev->struct_mutex);
788
		if (ret) {
789
			DRM_ERROR("Failed hw init on reset %d\n", ret);
790
			return ret;
4104 Serge 791
		}
792
 
793
		drm_irq_uninstall(dev);
794
		drm_irq_install(dev);
795
		intel_hpd_init(dev);
796
	} else {
797
		mutex_unlock(&dev->struct_mutex);
798
	}
799
 
800
	return 0;
801
}
802
 
803
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
804
{
805
	struct intel_device_info *intel_info =
806
		(struct intel_device_info *) ent->driver_data;
807
 
4560 Serge 808
	if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
809
		DRM_INFO("This hardware requires preliminary hardware support.\n"
810
			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
811
		return -ENODEV;
812
	}
813
 
4104 Serge 814
	/* Only bind to function 0 of the device. Early generations
815
	 * used function 1 as a placeholder for multi-head. This causes
816
	 * us confusion instead, especially on the systems where both
817
	 * functions have the same PCI-ID!
818
	 */
819
	if (PCI_FUNC(pdev->devfn))
820
		return -ENODEV;
821
 
822
	/* We've managed to ship a kms-enabled ddx that shipped with an XvMC
823
	 * implementation for gen3 (and only gen3) that used legacy drm maps
824
	 * (gasp!) to share buffers between X and the client. Hence we need to
825
	 * keep around the fake agp stuff for gen3, even when kms is enabled. */
826
	if (intel_info->gen != 3) {
827
		driver.driver_features &=
828
			~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
829
	} else if (!intel_agp_enabled) {
830
		DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
831
		return -ENODEV;
832
	}
833
 
834
	return drm_get_pci_dev(pdev, ent, &driver);
835
}
836
 
837
static void
838
i915_pci_remove(struct pci_dev *pdev)
839
{
840
	struct drm_device *dev = pci_get_drvdata(pdev);
841
 
842
	drm_put_dev(dev);
843
}
844
 
845
static int i915_pm_suspend(struct device *dev)
846
{
847
	struct pci_dev *pdev = to_pci_dev(dev);
848
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
849
	int error;
850
 
851
	if (!drm_dev || !drm_dev->dev_private) {
852
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
853
		return -ENODEV;
854
	}
855
 
856
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
857
		return 0;
858
 
859
	error = i915_drm_freeze(drm_dev);
860
	if (error)
861
		return error;
862
 
863
	pci_disable_device(pdev);
864
	pci_set_power_state(pdev, PCI_D3hot);
865
 
866
	return 0;
867
}
868
 
869
static int i915_pm_resume(struct device *dev)
870
{
871
	struct pci_dev *pdev = to_pci_dev(dev);
872
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
873
 
874
	return i915_resume(drm_dev);
875
}
876
 
877
static int i915_pm_freeze(struct device *dev)
878
{
879
	struct pci_dev *pdev = to_pci_dev(dev);
880
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
881
 
882
	if (!drm_dev || !drm_dev->dev_private) {
883
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
884
		return -ENODEV;
885
	}
886
 
887
	return i915_drm_freeze(drm_dev);
888
}
889
 
890
static int i915_pm_thaw(struct device *dev)
891
{
892
	struct pci_dev *pdev = to_pci_dev(dev);
893
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
894
 
895
	return i915_drm_thaw(drm_dev);
896
}
897
 
898
static int i915_pm_poweroff(struct device *dev)
899
{
900
	struct pci_dev *pdev = to_pci_dev(dev);
901
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
902
 
903
	return i915_drm_freeze(drm_dev);
904
}
905
 
4560 Serge 906
 
4104 Serge 907
#endif
908
 
3260 Serge 909
static struct drm_driver driver = {
910
    /* Don't use MTRRs here; the Xserver or userspace app should
911
     * deal with them for Intel hardware.
912
     */
3482 Serge 913
    .driver_features =
4560 Serge 914
	    DRIVER_USE_AGP |
4104 Serge 915
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
916
	    DRIVER_RENDER,
917
    .load = i915_driver_load,
3260 Serge 918
//    .unload = i915_driver_unload,
3263 Serge 919
      .open = i915_driver_open,
3260 Serge 920
//    .lastclose = i915_driver_lastclose,
921
//    .preclose = i915_driver_preclose,
922
//    .postclose = i915_driver_postclose,
923
 
924
    /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
925
//    .suspend = i915_suspend,
926
//    .resume = i915_resume,
927
 
928
//    .device_is_agp = i915_driver_device_is_agp,
929
//    .master_create = i915_master_create,
930
//    .master_destroy = i915_master_destroy,
4104 Serge 931
#if defined(CONFIG_DEBUG_FS)
932
	.debugfs_init = i915_debugfs_init,
933
	.debugfs_cleanup = i915_debugfs_cleanup,
934
#endif
3260 Serge 935
    .gem_free_object = i915_gem_free_object,
936
 
937
//    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
938
//    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
939
//    .gem_prime_export = i915_gem_prime_export,
940
//    .gem_prime_import = i915_gem_prime_import,
941
 
942
//    .dumb_create = i915_gem_dumb_create,
943
//    .dumb_map_offset = i915_gem_mmap_gtt,
944
//    .dumb_destroy = i915_gem_dumb_destroy,
945
//    .ioctls = i915_ioctls,
946
//    .fops = &i915_driver_fops,
947
//    .name = DRIVER_NAME,
948
//    .desc = DRIVER_DESC,
949
//    .date = DRIVER_DATE,
950
//    .major = DRIVER_MAJOR,
951
//    .minor = DRIVER_MINOR,
952
//    .patchlevel = DRIVER_PATCHLEVEL,
953
};
954
 
955
 
3243 Serge 956
 
3255 Serge 957
 
4104 Serge 958
int i915_init(void)
959
{
960
    static pci_dev_t device;
961
    const struct pci_device_id  *ent;
962
    int  err;
2325 Serge 963
 
4104 Serge 964
    ent = find_pci_device(&device, pciidlist);
965
    if( unlikely(ent == NULL) )
966
    {
967
        dbgprintf("device not found\n");
968
        return -ENODEV;
969
    };
2325 Serge 970
 
4104 Serge 971
    drm_core_init();
3255 Serge 972
 
4104 Serge 973
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
974
                                device.pci_dev.device);
2325 Serge 975
 
4293 Serge 976
    driver.driver_features |= DRIVER_MODESET;
977
 
4104 Serge 978
    err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
3263 Serge 979
 
4104 Serge 980
    return err;
981
}
2325 Serge 982