Subversion Repositories Kolibri OS

Rev

Rev 4293 | Rev 4560 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4293 Rev 4398
1
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
1
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2
 */
2
 */
3
/*
3
/*
4
 *
4
 *
5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6
 * All Rights Reserved.
6
 * All Rights Reserved.
7
 *
7
 *
8
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * Permission is hereby granted, free of charge, to any person obtaining a
9
 * copy of this software and associated documentation files (the
9
 * copy of this software and associated documentation files (the
10
 * "Software"), to deal in the Software without restriction, including
10
 * "Software"), to deal in the Software without restriction, including
11
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * without limitation the rights to use, copy, modify, merge, publish,
12
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * distribute, sub license, and/or sell copies of the Software, and to
13
 * permit persons to whom the Software is furnished to do so, subject to
13
 * permit persons to whom the Software is furnished to do so, subject to
14
 * the following conditions:
14
 * the following conditions:
15
 *
15
 *
16
 * The above copyright notice and this permission notice (including the
16
 * The above copyright notice and this permission notice (including the
17
 * next paragraph) shall be included in all copies or substantial portions
17
 * next paragraph) shall be included in all copies or substantial portions
18
 * of the Software.
18
 * of the Software.
19
 *
19
 *
20
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
 *
27
 *
28
 */
28
 */
29
 
29
 
30
//#include 
30
//#include 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include "i915_drv.h"
33
#include "i915_drv.h"
34
#include "i915_trace.h"
34
#include "i915_trace.h"
35
#include "intel_drv.h"
35
#include "intel_drv.h"
36
 
36
 
37
#include 
37
#include 
38
#include 
38
#include 
39
#include 
39
#include 
40
#include 
40
#include 
41
 
41
 
42
#include 
42
#include 
43
 
43
 
44
#include 
44
#include 
45
 
45
 
46
#define __read_mostly
46
#define __read_mostly
47
 
47
 
48
int init_display_kms(struct drm_device *dev);
48
int init_display_kms(struct drm_device *dev);
49
 
49
 
50
static int i915_modeset __read_mostly = 1;
50
static int i915_modeset __read_mostly = 1;
51
module_param_named(modeset, i915_modeset, int, 0400);
51
module_param_named(modeset, i915_modeset, int, 0400);
52
MODULE_PARM_DESC(modeset,
52
MODULE_PARM_DESC(modeset,
53
		"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
53
		"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
54
		"1=on, -1=force vga console preference [default])");
54
		"1=on, -1=force vga console preference [default])");
55
 
55
 
56
unsigned int i915_fbpercrtc __always_unused = 0;
56
unsigned int i915_fbpercrtc __always_unused = 0;
57
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
57
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
58
 
58
 
59
int i915_panel_ignore_lid __read_mostly         =  1;
59
int i915_panel_ignore_lid __read_mostly         =  1;
60
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
60
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
61
MODULE_PARM_DESC(panel_ignore_lid,
61
MODULE_PARM_DESC(panel_ignore_lid,
62
		"Override lid status (0=autodetect, 1=autodetect disabled [default], "
62
		"Override lid status (0=autodetect, 1=autodetect disabled [default], "
63
		"-1=force lid closed, -2=force lid open)");
63
		"-1=force lid closed, -2=force lid open)");
64
 
64
 
65
unsigned int i915_powersave __read_mostly = 0;
65
unsigned int i915_powersave __read_mostly = 0;
66
module_param_named(powersave, i915_powersave, int, 0600);
66
module_param_named(powersave, i915_powersave, int, 0600);
67
MODULE_PARM_DESC(powersave,
67
MODULE_PARM_DESC(powersave,
68
		"Enable powersavings, fbc, downclocking, etc. (default: true)");
68
		"Enable powersavings, fbc, downclocking, etc. (default: true)");
69
 
69
 
70
int i915_semaphores __read_mostly = -1;
70
int i915_semaphores __read_mostly = -1;
71
module_param_named(semaphores, i915_semaphores, int, 0600);
71
module_param_named(semaphores, i915_semaphores, int, 0600);
72
MODULE_PARM_DESC(semaphores,
72
MODULE_PARM_DESC(semaphores,
73
		"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
73
		"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
74
 
74
 
75
int i915_enable_rc6 __read_mostly = 0;
75
int i915_enable_rc6 __read_mostly = 0;
76
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
76
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
77
MODULE_PARM_DESC(i915_enable_rc6,
77
MODULE_PARM_DESC(i915_enable_rc6,
78
		"Enable power-saving render C-state 6. "
78
		"Enable power-saving render C-state 6. "
79
		"Different stages can be selected via bitmask values "
79
		"Different stages can be selected via bitmask values "
80
		"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
80
		"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
81
		"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
81
		"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
82
		"default: -1 (use per-chip default)");
82
		"default: -1 (use per-chip default)");
83
 
83
 
84
int i915_enable_fbc __read_mostly = 0;
84
int i915_enable_fbc __read_mostly = 0;
85
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
85
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
86
MODULE_PARM_DESC(i915_enable_fbc,
86
MODULE_PARM_DESC(i915_enable_fbc,
87
		"Enable frame buffer compression for power savings "
87
		"Enable frame buffer compression for power savings "
88
		"(default: -1 (use per-chip default))");
88
		"(default: -1 (use per-chip default))");
89
 
89
 
90
unsigned int i915_lvds_downclock  __read_mostly =  0;
90
unsigned int i915_lvds_downclock  __read_mostly =  0;
91
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
91
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
92
MODULE_PARM_DESC(lvds_downclock,
92
MODULE_PARM_DESC(lvds_downclock,
93
		"Use panel (LVDS/eDP) downclocking for power savings "
93
		"Use panel (LVDS/eDP) downclocking for power savings "
94
		"(default: false)");
94
		"(default: false)");
95
 
95
 
96
int i915_lvds_channel_mode __read_mostly;
96
int i915_lvds_channel_mode __read_mostly;
97
module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
97
module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
98
MODULE_PARM_DESC(lvds_channel_mode,
98
MODULE_PARM_DESC(lvds_channel_mode,
99
		 "Specify LVDS channel mode "
99
		 "Specify LVDS channel mode "
100
		 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
100
		 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
101
 
101
 
102
int i915_panel_use_ssc __read_mostly = -1;
102
int i915_panel_use_ssc __read_mostly = -1;
103
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
103
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
104
MODULE_PARM_DESC(lvds_use_ssc,
104
MODULE_PARM_DESC(lvds_use_ssc,
105
		"Use Spread Spectrum Clock with panels [LVDS/eDP] "
105
		"Use Spread Spectrum Clock with panels [LVDS/eDP] "
106
		"(default: auto from VBT)");
106
		"(default: auto from VBT)");
107
 
107
 
108
int i915_vbt_sdvo_panel_type __read_mostly      = -1;
108
int i915_vbt_sdvo_panel_type __read_mostly      = -1;
109
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
109
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
110
MODULE_PARM_DESC(vbt_sdvo_panel_type,
110
MODULE_PARM_DESC(vbt_sdvo_panel_type,
111
		"Override/Ignore selection of SDVO panel mode in the VBT "
111
		"Override/Ignore selection of SDVO panel mode in the VBT "
112
		"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
112
		"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
113
 
113
 
114
static bool i915_try_reset __read_mostly = true;
114
static bool i915_try_reset __read_mostly = true;
115
module_param_named(reset, i915_try_reset, bool, 0600);
115
module_param_named(reset, i915_try_reset, bool, 0600);
116
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
116
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
117
 
117
 
118
bool i915_enable_hangcheck __read_mostly = false;
118
bool i915_enable_hangcheck __read_mostly = false;
119
module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
119
module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
120
MODULE_PARM_DESC(enable_hangcheck,
120
MODULE_PARM_DESC(enable_hangcheck,
121
		"Periodically check GPU activity for detecting hangs. "
121
		"Periodically check GPU activity for detecting hangs. "
122
		"WARNING: Disabling this can cause system wide hangs. "
122
		"WARNING: Disabling this can cause system wide hangs. "
123
		"(default: true)");
123
		"(default: true)");
124
 
124
 
125
int i915_enable_ppgtt __read_mostly = 0;
125
int i915_enable_ppgtt __read_mostly = 0;
126
module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
126
module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
127
MODULE_PARM_DESC(i915_enable_ppgtt,
127
MODULE_PARM_DESC(i915_enable_ppgtt,
128
		"Enable PPGTT (default: true)");
128
		"Enable PPGTT (default: true)");
129
 
129
 
130
int i915_enable_psr __read_mostly = 0;
130
int i915_enable_psr __read_mostly = 0;
131
module_param_named(enable_psr, i915_enable_psr, int, 0600);
131
module_param_named(enable_psr, i915_enable_psr, int, 0600);
132
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
132
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
133
 
133
 
134
unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
134
unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
135
module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
135
module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
136
MODULE_PARM_DESC(preliminary_hw_support,
136
MODULE_PARM_DESC(preliminary_hw_support,
137
		"Enable preliminary hardware support.");
137
		"Enable preliminary hardware support.");
138
 
138
 
139
int i915_disable_power_well __read_mostly = 1;
139
int i915_disable_power_well __read_mostly = 1;
140
module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
140
module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
141
MODULE_PARM_DESC(disable_power_well,
141
MODULE_PARM_DESC(disable_power_well,
142
		 "Disable the power well when possible (default: true)");
142
		 "Disable the power well when possible (default: true)");
143
 
143
 
144
int i915_enable_ips __read_mostly = 1;
144
int i915_enable_ips __read_mostly = 1;
145
module_param_named(enable_ips, i915_enable_ips, int, 0600);
145
module_param_named(enable_ips, i915_enable_ips, int, 0600);
146
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
146
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
147
 
147
 
148
bool i915_fastboot __read_mostly = 0;
148
bool i915_fastboot __read_mostly = 0;
149
module_param_named(fastboot, i915_fastboot, bool, 0600);
149
module_param_named(fastboot, i915_fastboot, bool, 0600);
150
MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
150
MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
151
		 "(default: false)");
151
		 "(default: false)");
152
 
152
 
153
int i915_enable_pc8 __read_mostly = 0;
153
int i915_enable_pc8 __read_mostly = 0;
154
module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
154
module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
155
MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
155
MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
156
 
156
 
157
int i915_pc8_timeout __read_mostly = 5000;
157
int i915_pc8_timeout __read_mostly = 5000;
158
module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
158
module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
159
MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
159
MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
160
 
160
 
161
bool i915_prefault_disable __read_mostly;
161
bool i915_prefault_disable __read_mostly;
162
module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
162
module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
163
MODULE_PARM_DESC(prefault_disable,
163
MODULE_PARM_DESC(prefault_disable,
164
		"Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
164
		"Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
165
 
165
 
166
static struct drm_driver driver;
166
static struct drm_driver driver;
167
extern int intel_agp_enabled;
167
extern int intel_agp_enabled;
168
 
168
 
169
#define PCI_VENDOR_ID_INTEL        0x8086
169
#define PCI_VENDOR_ID_INTEL        0x8086
170
 
170
 
171
 
171
 
172
static const struct intel_device_info intel_i915g_info = {
172
static const struct intel_device_info intel_i915g_info = {
173
	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
173
	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
174
	.has_overlay = 1, .overlay_needs_physical = 1,
174
	.has_overlay = 1, .overlay_needs_physical = 1,
175
};
175
};
176
static const struct intel_device_info intel_i915gm_info = {
176
static const struct intel_device_info intel_i915gm_info = {
177
	.gen = 3, .is_mobile = 1, .num_pipes = 2,
177
	.gen = 3, .is_mobile = 1, .num_pipes = 2,
178
	.cursor_needs_physical = 1,
178
	.cursor_needs_physical = 1,
179
	.has_overlay = 1, .overlay_needs_physical = 1,
179
	.has_overlay = 1, .overlay_needs_physical = 1,
180
	.supports_tv = 1,
180
	.supports_tv = 1,
181
};
181
};
182
static const struct intel_device_info intel_i945g_info = {
182
static const struct intel_device_info intel_i945g_info = {
183
	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
183
	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
184
	.has_overlay = 1, .overlay_needs_physical = 1,
184
	.has_overlay = 1, .overlay_needs_physical = 1,
185
};
185
};
186
static const struct intel_device_info intel_i945gm_info = {
186
static const struct intel_device_info intel_i945gm_info = {
187
	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
187
	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
188
	.has_hotplug = 1, .cursor_needs_physical = 1,
188
	.has_hotplug = 1, .cursor_needs_physical = 1,
189
	.has_overlay = 1, .overlay_needs_physical = 1,
189
	.has_overlay = 1, .overlay_needs_physical = 1,
190
	.supports_tv = 1,
190
	.supports_tv = 1,
191
};
191
};
192
 
192
 
193
static const struct intel_device_info intel_i965g_info = {
193
static const struct intel_device_info intel_i965g_info = {
194
	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
194
	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
195
	.has_hotplug = 1,
195
	.has_hotplug = 1,
196
	.has_overlay = 1,
196
	.has_overlay = 1,
197
};
197
};
198
 
198
 
199
static const struct intel_device_info intel_i965gm_info = {
199
static const struct intel_device_info intel_i965gm_info = {
200
	.gen = 4, .is_crestline = 1, .num_pipes = 2,
200
	.gen = 4, .is_crestline = 1, .num_pipes = 2,
201
	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
201
	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
202
	.has_overlay = 1,
202
	.has_overlay = 1,
203
	.supports_tv = 1,
203
	.supports_tv = 1,
204
};
204
};
205
 
205
 
206
static const struct intel_device_info intel_g33_info = {
206
static const struct intel_device_info intel_g33_info = {
207
	.gen = 3, .is_g33 = 1, .num_pipes = 2,
207
	.gen = 3, .is_g33 = 1, .num_pipes = 2,
208
	.need_gfx_hws = 1, .has_hotplug = 1,
208
	.need_gfx_hws = 1, .has_hotplug = 1,
209
	.has_overlay = 1,
209
	.has_overlay = 1,
210
};
210
};
211
 
211
 
212
static const struct intel_device_info intel_g45_info = {
212
static const struct intel_device_info intel_g45_info = {
213
	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
213
	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
214
	.has_pipe_cxsr = 1, .has_hotplug = 1,
214
	.has_pipe_cxsr = 1, .has_hotplug = 1,
215
	.has_bsd_ring = 1,
215
	.has_bsd_ring = 1,
216
};
216
};
217
 
217
 
218
static const struct intel_device_info intel_gm45_info = {
218
static const struct intel_device_info intel_gm45_info = {
219
	.gen = 4, .is_g4x = 1, .num_pipes = 2,
219
	.gen = 4, .is_g4x = 1, .num_pipes = 2,
220
	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
220
	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
221
	.has_pipe_cxsr = 1, .has_hotplug = 1,
221
	.has_pipe_cxsr = 1, .has_hotplug = 1,
222
	.supports_tv = 1,
222
	.supports_tv = 1,
223
	.has_bsd_ring = 1,
223
	.has_bsd_ring = 1,
224
};
224
};
225
 
225
 
226
static const struct intel_device_info intel_pineview_info = {
226
static const struct intel_device_info intel_pineview_info = {
227
	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
227
	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
228
	.need_gfx_hws = 1, .has_hotplug = 1,
228
	.need_gfx_hws = 1, .has_hotplug = 1,
229
	.has_overlay = 1,
229
	.has_overlay = 1,
230
};
230
};
231
 
231
 
232
static const struct intel_device_info intel_ironlake_d_info = {
232
static const struct intel_device_info intel_ironlake_d_info = {
233
	.gen = 5, .num_pipes = 2,
233
	.gen = 5, .num_pipes = 2,
234
	.need_gfx_hws = 1, .has_hotplug = 1,
234
	.need_gfx_hws = 1, .has_hotplug = 1,
235
	.has_bsd_ring = 1,
235
	.has_bsd_ring = 1,
236
};
236
};
237
 
237
 
238
static const struct intel_device_info intel_ironlake_m_info = {
238
static const struct intel_device_info intel_ironlake_m_info = {
239
	.gen = 5, .is_mobile = 1, .num_pipes = 2,
239
	.gen = 5, .is_mobile = 1, .num_pipes = 2,
240
	.need_gfx_hws = 1, .has_hotplug = 1,
240
	.need_gfx_hws = 1, .has_hotplug = 1,
241
	.has_fbc = 1,
241
	.has_fbc = 1,
242
	.has_bsd_ring = 1,
242
	.has_bsd_ring = 1,
243
};
243
};
244
 
244
 
245
static const struct intel_device_info intel_sandybridge_d_info = {
245
static const struct intel_device_info intel_sandybridge_d_info = {
246
	.gen = 6, .num_pipes = 2,
246
	.gen = 6, .num_pipes = 2,
247
	.need_gfx_hws = 1, .has_hotplug = 1,
247
	.need_gfx_hws = 1, .has_hotplug = 1,
248
    .has_bsd_ring = 1,
248
    .has_bsd_ring = 1,
249
    .has_blt_ring = 1,
249
    .has_blt_ring = 1,
250
	.has_llc = 1,
250
	.has_llc = 1,
251
	.has_force_wake = 1,
251
	.has_force_wake = 1,
252
};
252
};
253
 
253
 
254
static const struct intel_device_info intel_sandybridge_m_info = {
254
static const struct intel_device_info intel_sandybridge_m_info = {
255
	.gen = 6, .is_mobile = 1, .num_pipes = 2,
255
	.gen = 6, .is_mobile = 1, .num_pipes = 2,
256
	.need_gfx_hws = 1, .has_hotplug = 1,
256
	.need_gfx_hws = 1, .has_hotplug = 1,
257
    .has_fbc      = 1,
257
    .has_fbc      = 1,
258
    .has_bsd_ring = 1,
258
    .has_bsd_ring = 1,
259
    .has_blt_ring = 1,
259
    .has_blt_ring = 1,
260
	.has_llc = 1,
260
	.has_llc = 1,
261
	.has_force_wake = 1,
261
	.has_force_wake = 1,
262
};
262
};
263
 
263
 
264
#define GEN7_FEATURES  \
264
#define GEN7_FEATURES  \
265
	.gen = 7, .num_pipes = 3, \
265
	.gen = 7, .num_pipes = 3, \
266
	.need_gfx_hws = 1, .has_hotplug = 1, \
266
	.need_gfx_hws = 1, .has_hotplug = 1, \
267
	.has_bsd_ring = 1, \
267
	.has_bsd_ring = 1, \
268
	.has_blt_ring = 1, \
268
	.has_blt_ring = 1, \
269
	.has_llc = 1, \
269
	.has_llc = 1, \
270
	.has_force_wake = 1
270
	.has_force_wake = 1
271
 
271
 
272
static const struct intel_device_info intel_ivybridge_d_info = {
272
static const struct intel_device_info intel_ivybridge_d_info = {
273
	GEN7_FEATURES,
273
	GEN7_FEATURES,
274
	.is_ivybridge = 1,
274
	.is_ivybridge = 1,
275
};
275
};
276
 
276
 
277
static const struct intel_device_info intel_ivybridge_m_info = {
277
static const struct intel_device_info intel_ivybridge_m_info = {
278
	GEN7_FEATURES,
278
	GEN7_FEATURES,
279
	.is_ivybridge = 1,
279
	.is_ivybridge = 1,
280
	.is_mobile = 1,
280
	.is_mobile = 1,
281
	.has_fbc = 1,
281
	.has_fbc = 1,
282
};
282
};
283
 
283
 
284
static const struct intel_device_info intel_ivybridge_q_info = {
284
static const struct intel_device_info intel_ivybridge_q_info = {
285
	GEN7_FEATURES,
285
	GEN7_FEATURES,
286
	.is_ivybridge = 1,
286
	.is_ivybridge = 1,
287
	.num_pipes = 0, /* legal, last one wins */
287
	.num_pipes = 0, /* legal, last one wins */
288
};
288
};
289
 
289
 
290
static const struct intel_device_info intel_valleyview_m_info = {
290
static const struct intel_device_info intel_valleyview_m_info = {
291
	GEN7_FEATURES,
291
	GEN7_FEATURES,
292
	.is_mobile = 1,
292
	.is_mobile = 1,
293
	.num_pipes = 2,
293
	.num_pipes = 2,
294
	.is_valleyview = 1,
294
	.is_valleyview = 1,
295
	.display_mmio_offset = VLV_DISPLAY_BASE,
295
	.display_mmio_offset = VLV_DISPLAY_BASE,
296
	.has_llc = 0, /* legal, last one wins */
296
	.has_llc = 0, /* legal, last one wins */
297
};
297
};
298
 
298
 
299
static const struct intel_device_info intel_valleyview_d_info = {
299
static const struct intel_device_info intel_valleyview_d_info = {
300
	GEN7_FEATURES,
300
	GEN7_FEATURES,
301
	.num_pipes = 2,
301
	.num_pipes = 2,
302
	.is_valleyview = 1,
302
	.is_valleyview = 1,
303
	.display_mmio_offset = VLV_DISPLAY_BASE,
303
	.display_mmio_offset = VLV_DISPLAY_BASE,
304
	.has_llc = 0, /* legal, last one wins */
304
	.has_llc = 0, /* legal, last one wins */
305
};
305
};
306
 
306
 
307
static const struct intel_device_info intel_haswell_d_info = {
307
static const struct intel_device_info intel_haswell_d_info = {
308
	GEN7_FEATURES,
308
	GEN7_FEATURES,
309
	.is_haswell = 1,
309
	.is_haswell = 1,
310
	.has_ddi = 1,
310
	.has_ddi = 1,
311
	.has_fpga_dbg = 1,
311
	.has_fpga_dbg = 1,
312
	.has_vebox_ring = 1,
312
	.has_vebox_ring = 1,
313
};
313
};
314
 
314
 
315
static const struct intel_device_info intel_haswell_m_info = {
315
static const struct intel_device_info intel_haswell_m_info = {
316
	GEN7_FEATURES,
316
	GEN7_FEATURES,
317
	.is_haswell = 1,
317
	.is_haswell = 1,
318
	.is_mobile = 1,
318
	.is_mobile = 1,
319
	.has_ddi = 1,
319
	.has_ddi = 1,
320
	.has_fpga_dbg = 1,
320
	.has_fpga_dbg = 1,
321
	.has_fbc = 1,
321
	.has_fbc = 1,
322
	.has_vebox_ring = 1,
322
	.has_vebox_ring = 1,
323
};
323
};
324
 
324
 
325
/*
325
/*
326
 * Make sure any device matches here are from most specific to most
326
 * Make sure any device matches here are from most specific to most
327
 * general.  For example, since the Quanta match is based on the subsystem
327
 * general.  For example, since the Quanta match is based on the subsystem
328
 * and subvendor IDs, we need it to come before the more general IVB
328
 * and subvendor IDs, we need it to come before the more general IVB
329
 * PCI ID matches, otherwise we'll use the wrong info struct above.
329
 * PCI ID matches, otherwise we'll use the wrong info struct above.
330
 */
330
 */
331
#define INTEL_PCI_IDS \
331
#define INTEL_PCI_IDS \
332
	INTEL_I915G_IDS(&intel_i915g_info),	\
332
	INTEL_I915G_IDS(&intel_i915g_info),	\
333
	INTEL_I915GM_IDS(&intel_i915gm_info),	\
333
	INTEL_I915GM_IDS(&intel_i915gm_info),	\
334
	INTEL_I945G_IDS(&intel_i945g_info),	\
334
	INTEL_I945G_IDS(&intel_i945g_info),	\
335
	INTEL_I945GM_IDS(&intel_i945gm_info),	\
335
	INTEL_I945GM_IDS(&intel_i945gm_info),	\
336
	INTEL_I965G_IDS(&intel_i965g_info),	\
336
	INTEL_I965G_IDS(&intel_i965g_info),	\
337
	INTEL_G33_IDS(&intel_g33_info),		\
337
	INTEL_G33_IDS(&intel_g33_info),		\
338
	INTEL_I965GM_IDS(&intel_i965gm_info),	\
338
	INTEL_I965GM_IDS(&intel_i965gm_info),	\
339
	INTEL_GM45_IDS(&intel_gm45_info), 	\
339
	INTEL_GM45_IDS(&intel_gm45_info), 	\
340
	INTEL_G45_IDS(&intel_g45_info), 	\
340
	INTEL_G45_IDS(&intel_g45_info), 	\
341
	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
341
	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
342
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
342
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
343
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
343
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
344
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
344
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
345
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
345
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
346
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
346
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
347
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
347
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
348
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
348
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
349
	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
349
	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
350
	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
350
	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
351
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
351
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
352
	INTEL_VLV_D_IDS(&intel_valleyview_d_info)
352
	INTEL_VLV_D_IDS(&intel_valleyview_d_info)
353
 
353
 
354
static const struct pci_device_id pciidlist[] = {       /* aka */
354
static const struct pci_device_id pciidlist[] = {       /* aka */
355
	INTEL_PCI_IDS,
355
	INTEL_PCI_IDS,
356
    {0, 0, 0}
356
    {0, 0, 0}
357
};
357
};
358
 
358
 
359
#define INTEL_PCH_DEVICE_ID_MASK        0xff00
359
#define INTEL_PCH_DEVICE_ID_MASK        0xff00
360
#define INTEL_PCH_IBX_DEVICE_ID_TYPE    0x3b00
360
#define INTEL_PCH_IBX_DEVICE_ID_TYPE    0x3b00
361
#define INTEL_PCH_CPT_DEVICE_ID_TYPE    0x1c00
361
#define INTEL_PCH_CPT_DEVICE_ID_TYPE    0x1c00
362
#define INTEL_PCH_PPT_DEVICE_ID_TYPE    0x1e00
362
#define INTEL_PCH_PPT_DEVICE_ID_TYPE    0x1e00
363
#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
363
#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
364
 
364
 
365
void intel_detect_pch(struct drm_device *dev)
365
void intel_detect_pch(struct drm_device *dev)
366
{
366
{
367
    struct drm_i915_private *dev_priv = dev->dev_private;
367
    struct drm_i915_private *dev_priv = dev->dev_private;
368
    struct pci_dev *pch;
368
    struct pci_dev *pch;
369
 
369
 
370
	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
370
	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
371
	 * (which really amounts to a PCH but no South Display).
371
	 * (which really amounts to a PCH but no South Display).
372
	 */
372
	 */
373
	if (INTEL_INFO(dev)->num_pipes == 0) {
373
	if (INTEL_INFO(dev)->num_pipes == 0) {
374
		dev_priv->pch_type = PCH_NOP;
374
		dev_priv->pch_type = PCH_NOP;
375
		return;
375
		return;
376
	}
376
	}
377
 
377
 
378
    /*
378
    /*
379
     * The reason to probe ISA bridge instead of Dev31:Fun0 is to
379
     * The reason to probe ISA bridge instead of Dev31:Fun0 is to
380
     * make graphics device passthrough work easy for VMM, that only
380
     * make graphics device passthrough work easy for VMM, that only
381
     * need to expose ISA bridge to let driver know the real hardware
381
     * need to expose ISA bridge to let driver know the real hardware
382
     * underneath. This is a requirement from virtualization team.
382
     * underneath. This is a requirement from virtualization team.
383
	 *
383
	 *
384
	 * In some virtualized environments (e.g. XEN), there is irrelevant
384
	 * In some virtualized environments (e.g. XEN), there is irrelevant
385
	 * ISA bridge in the system. To work reliably, we should scan trhough
385
	 * ISA bridge in the system. To work reliably, we should scan trhough
386
	 * all the ISA bridge devices and check for the first match, instead
386
	 * all the ISA bridge devices and check for the first match, instead
387
	 * of only checking the first one.
387
	 * of only checking the first one.
388
     */
388
     */
389
    pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
389
    pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
390
	while (pch) {
390
	while (pch) {
391
		struct pci_dev *curr = pch;
391
		struct pci_dev *curr = pch;
392
        if (pch->vendor == PCI_VENDOR_ID_INTEL) {
392
        if (pch->vendor == PCI_VENDOR_ID_INTEL) {
393
			unsigned short id;
393
			unsigned short id;
394
            id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
394
            id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
395
			dev_priv->pch_id = id;
395
			dev_priv->pch_id = id;
396
 
396
 
397
            if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
397
            if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
398
                dev_priv->pch_type = PCH_IBX;
398
                dev_priv->pch_type = PCH_IBX;
399
                DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
399
                DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
400
				WARN_ON(!IS_GEN5(dev));
400
				WARN_ON(!IS_GEN5(dev));
401
            } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
401
            } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
402
                dev_priv->pch_type = PCH_CPT;
402
                dev_priv->pch_type = PCH_CPT;
403
                DRM_DEBUG_KMS("Found CougarPoint PCH\n");
403
                DRM_DEBUG_KMS("Found CougarPoint PCH\n");
404
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
404
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
405
            } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
405
            } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
406
                /* PantherPoint is CPT compatible */
406
                /* PantherPoint is CPT compatible */
407
                dev_priv->pch_type = PCH_CPT;
407
                dev_priv->pch_type = PCH_CPT;
408
                DRM_DEBUG_KMS("Found PatherPoint PCH\n");
408
                DRM_DEBUG_KMS("Found PatherPoint PCH\n");
409
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
409
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
410
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
410
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
411
				dev_priv->pch_type = PCH_LPT;
411
				dev_priv->pch_type = PCH_LPT;
412
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
412
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
413
				WARN_ON(!IS_HASWELL(dev));
413
				WARN_ON(!IS_HASWELL(dev));
414
				WARN_ON(IS_ULT(dev));
414
				WARN_ON(IS_ULT(dev));
415
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
415
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
416
				dev_priv->pch_type = PCH_LPT;
416
				dev_priv->pch_type = PCH_LPT;
417
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
417
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
418
				WARN_ON(!IS_HASWELL(dev));
418
				WARN_ON(!IS_HASWELL(dev));
419
				WARN_ON(!IS_ULT(dev));
419
				WARN_ON(!IS_ULT(dev));
420
			} else {
420
			} else {
421
				goto check_next;
421
				goto check_next;
422
            }
422
            }
423
			break;
423
			break;
424
        }
424
        }
425
check_next:
425
check_next:
426
		pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
426
		pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
427
//       pci_dev_put(curr);
427
//       pci_dev_put(curr);
428
    }
428
    }
429
	if (!pch)
429
	if (!pch)
430
		DRM_DEBUG_KMS("No PCH found?\n");
430
		DRM_DEBUG_KMS("No PCH found?\n");
431
}
431
}
432
 
432
 
433
bool i915_semaphore_is_enabled(struct drm_device *dev)
433
bool i915_semaphore_is_enabled(struct drm_device *dev)
434
{
434
{
435
	if (INTEL_INFO(dev)->gen < 6)
435
	if (INTEL_INFO(dev)->gen < 6)
436
		return 0;
436
		return 0;
437
 
437
 
438
	if (i915_semaphores >= 0)
438
	if (i915_semaphores >= 0)
439
		return i915_semaphores;
439
		return i915_semaphores;
440
 
440
 
441
#ifdef CONFIG_INTEL_IOMMU
441
#ifdef CONFIG_INTEL_IOMMU
442
	/* Enable semaphores on SNB when IO remapping is off */
442
	/* Enable semaphores on SNB when IO remapping is off */
443
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
443
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
444
		return false;
444
		return false;
445
#endif
445
#endif
446
 
446
 
447
	return 1;
447
	return 1;
448
}
448
}
449
 
449
 
450
#if 0
450
#if 0
451
static int i915_drm_freeze(struct drm_device *dev)
451
static int i915_drm_freeze(struct drm_device *dev)
452
{
452
{
453
	struct drm_i915_private *dev_priv = dev->dev_private;
453
	struct drm_i915_private *dev_priv = dev->dev_private;
454
	struct drm_crtc *crtc;
454
	struct drm_crtc *crtc;
455
 
455
 
456
	/* ignore lid events during suspend */
456
	/* ignore lid events during suspend */
457
	mutex_lock(&dev_priv->modeset_restore_lock);
457
	mutex_lock(&dev_priv->modeset_restore_lock);
458
	dev_priv->modeset_restore = MODESET_SUSPENDED;
458
	dev_priv->modeset_restore = MODESET_SUSPENDED;
459
	mutex_unlock(&dev_priv->modeset_restore_lock);
459
	mutex_unlock(&dev_priv->modeset_restore_lock);
460
 
460
 
461
	/* We do a lot of poking in a lot of registers, make sure they work
461
	/* We do a lot of poking in a lot of registers, make sure they work
462
	 * properly. */
462
	 * properly. */
463
	hsw_disable_package_c8(dev_priv);
463
	hsw_disable_package_c8(dev_priv);
464
	intel_set_power_well(dev, true);
464
	intel_set_power_well(dev, true);
465
 
465
 
466
	drm_kms_helper_poll_disable(dev);
466
	drm_kms_helper_poll_disable(dev);
467
 
467
 
468
	pci_save_state(dev->pdev);
468
	pci_save_state(dev->pdev);
469
 
469
 
470
	/* If KMS is active, we do the leavevt stuff here */
470
	/* If KMS is active, we do the leavevt stuff here */
471
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
471
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
472
		int error;
472
		int error;
473
 
473
 
474
		mutex_lock(&dev->struct_mutex);
474
		mutex_lock(&dev->struct_mutex);
475
		error = i915_gem_idle(dev);
475
		error = i915_gem_idle(dev);
476
		mutex_unlock(&dev->struct_mutex);
476
		mutex_unlock(&dev->struct_mutex);
477
		if (error) {
477
		if (error) {
478
			dev_err(&dev->pdev->dev,
478
			dev_err(&dev->pdev->dev,
479
				"GEM idle failed, resume might fail\n");
479
				"GEM idle failed, resume might fail\n");
480
			return error;
480
			return error;
481
		}
481
		}
482
 
482
 
483
		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
483
		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
484
 
484
 
485
		drm_irq_uninstall(dev);
485
		drm_irq_uninstall(dev);
486
		dev_priv->enable_hotplug_processing = false;
486
		dev_priv->enable_hotplug_processing = false;
487
		/*
487
		/*
488
		 * Disable CRTCs directly since we want to preserve sw state
488
		 * Disable CRTCs directly since we want to preserve sw state
489
		 * for _thaw.
489
		 * for _thaw.
490
		 */
490
		 */
491
		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
491
		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
492
			dev_priv->display.crtc_disable(crtc);
492
			dev_priv->display.crtc_disable(crtc);
493
 
493
 
494
		intel_modeset_suspend_hw(dev);
494
		intel_modeset_suspend_hw(dev);
495
	}
495
	}
496
 
496
 
497
	i915_save_state(dev);
497
	i915_save_state(dev);
498
 
498
 
499
	intel_opregion_fini(dev);
499
	intel_opregion_fini(dev);
500
 
500
 
501
	console_lock();
501
	console_lock();
502
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
502
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
503
	console_unlock();
503
	console_unlock();
504
 
504
 
505
	return 0;
505
	return 0;
506
}
506
}
507
 
507
 
508
int i915_suspend(struct drm_device *dev, pm_message_t state)
508
int i915_suspend(struct drm_device *dev, pm_message_t state)
509
{
509
{
510
	int error;
510
	int error;
511
 
511
 
512
	if (!dev || !dev->dev_private) {
512
	if (!dev || !dev->dev_private) {
513
		DRM_ERROR("dev: %p\n", dev);
513
		DRM_ERROR("dev: %p\n", dev);
514
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
514
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
515
		return -ENODEV;
515
		return -ENODEV;
516
	}
516
	}
517
 
517
 
518
	if (state.event == PM_EVENT_PRETHAW)
518
	if (state.event == PM_EVENT_PRETHAW)
519
		return 0;
519
		return 0;
520
 
520
 
521
 
521
 
522
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
522
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
523
		return 0;
523
		return 0;
524
 
524
 
525
	error = i915_drm_freeze(dev);
525
	error = i915_drm_freeze(dev);
526
	if (error)
526
	if (error)
527
		return error;
527
		return error;
528
 
528
 
529
	if (state.event == PM_EVENT_SUSPEND) {
529
	if (state.event == PM_EVENT_SUSPEND) {
530
		/* Shut down the device */
530
		/* Shut down the device */
531
		pci_disable_device(dev->pdev);
531
		pci_disable_device(dev->pdev);
532
		pci_set_power_state(dev->pdev, PCI_D3hot);
532
		pci_set_power_state(dev->pdev, PCI_D3hot);
533
	}
533
	}
534
 
534
 
535
	return 0;
535
	return 0;
536
}
536
}
537
 
537
 
538
void intel_console_resume(struct work_struct *work)
538
void intel_console_resume(struct work_struct *work)
539
{
539
{
540
	struct drm_i915_private *dev_priv =
540
	struct drm_i915_private *dev_priv =
541
		container_of(work, struct drm_i915_private,
541
		container_of(work, struct drm_i915_private,
542
			     console_resume_work);
542
			     console_resume_work);
543
	struct drm_device *dev = dev_priv->dev;
543
	struct drm_device *dev = dev_priv->dev;
544
 
544
 
545
	console_lock();
545
	console_lock();
546
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
546
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
547
	console_unlock();
547
	console_unlock();
548
}
548
}
549
 
549
 
550
static void intel_resume_hotplug(struct drm_device *dev)
550
static void intel_resume_hotplug(struct drm_device *dev)
551
{
551
{
552
	struct drm_mode_config *mode_config = &dev->mode_config;
552
	struct drm_mode_config *mode_config = &dev->mode_config;
553
	struct intel_encoder *encoder;
553
	struct intel_encoder *encoder;
554
 
554
 
555
	mutex_lock(&mode_config->mutex);
555
	mutex_lock(&mode_config->mutex);
556
	DRM_DEBUG_KMS("running encoder hotplug functions\n");
556
	DRM_DEBUG_KMS("running encoder hotplug functions\n");
557
 
557
 
558
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
558
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
559
		if (encoder->hot_plug)
559
		if (encoder->hot_plug)
560
			encoder->hot_plug(encoder);
560
			encoder->hot_plug(encoder);
561
 
561
 
562
	mutex_unlock(&mode_config->mutex);
562
	mutex_unlock(&mode_config->mutex);
563
 
563
 
564
	/* Just fire off a uevent and let userspace tell us what to do */
564
	/* Just fire off a uevent and let userspace tell us what to do */
565
	drm_helper_hpd_irq_event(dev);
565
	drm_helper_hpd_irq_event(dev);
566
}
566
}
567
 
567
 
568
static int __i915_drm_thaw(struct drm_device *dev)
568
static int __i915_drm_thaw(struct drm_device *dev)
569
{
569
{
570
	struct drm_i915_private *dev_priv = dev->dev_private;
570
	struct drm_i915_private *dev_priv = dev->dev_private;
571
	int error = 0;
571
	int error = 0;
572
 
572
 
573
	i915_restore_state(dev);
573
	i915_restore_state(dev);
574
	intel_opregion_setup(dev);
574
	intel_opregion_setup(dev);
575
 
575
 
576
	/* KMS EnterVT equivalent */
576
	/* KMS EnterVT equivalent */
577
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
577
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
578
		intel_init_pch_refclk(dev);
578
		intel_init_pch_refclk(dev);
579
 
579
 
580
		mutex_lock(&dev->struct_mutex);
580
		mutex_lock(&dev->struct_mutex);
581
 
581
 
582
		error = i915_gem_init_hw(dev);
582
		error = i915_gem_init_hw(dev);
583
		mutex_unlock(&dev->struct_mutex);
583
		mutex_unlock(&dev->struct_mutex);
584
 
584
 
585
		/* We need working interrupts for modeset enabling ... */
585
		/* We need working interrupts for modeset enabling ... */
586
		drm_irq_install(dev);
586
		drm_irq_install(dev);
587
 
587
 
588
		intel_modeset_init_hw(dev);
588
		intel_modeset_init_hw(dev);
589
 
589
 
590
		drm_modeset_lock_all(dev);
590
		drm_modeset_lock_all(dev);
591
		intel_modeset_setup_hw_state(dev, true);
591
		intel_modeset_setup_hw_state(dev, true);
592
		drm_modeset_unlock_all(dev);
592
		drm_modeset_unlock_all(dev);
593
 
593
 
594
		/*
594
		/*
595
		 * ... but also need to make sure that hotplug processing
595
		 * ... but also need to make sure that hotplug processing
596
		 * doesn't cause havoc. Like in the driver load code we don't
596
		 * doesn't cause havoc. Like in the driver load code we don't
597
		 * bother with the tiny race here where we might loose hotplug
597
		 * bother with the tiny race here where we might loose hotplug
598
		 * notifications.
598
		 * notifications.
599
		 * */
599
		 * */
600
		intel_hpd_init(dev);
600
		intel_hpd_init(dev);
601
		dev_priv->enable_hotplug_processing = true;
601
		dev_priv->enable_hotplug_processing = true;
602
		/* Config may have changed between suspend and resume */
602
		/* Config may have changed between suspend and resume */
603
		intel_resume_hotplug(dev);
603
		intel_resume_hotplug(dev);
604
	}
604
	}
605
 
605
 
606
	intel_opregion_init(dev);
606
	intel_opregion_init(dev);
607
 
607
 
608
	/*
608
	/*
609
	 * The console lock can be pretty contented on resume due
609
	 * The console lock can be pretty contented on resume due
610
	 * to all the printk activity.  Try to keep it out of the hot
610
	 * to all the printk activity.  Try to keep it out of the hot
611
	 * path of resume if possible.
611
	 * path of resume if possible.
612
	 */
612
	 */
613
	if (console_trylock()) {
613
	if (console_trylock()) {
614
		intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
614
		intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
615
		console_unlock();
615
		console_unlock();
616
	} else {
616
	} else {
617
		schedule_work(&dev_priv->console_resume_work);
617
		schedule_work(&dev_priv->console_resume_work);
618
	}
618
	}
619
 
619
 
620
	/* Undo what we did at i915_drm_freeze so the refcount goes back to the
620
	/* Undo what we did at i915_drm_freeze so the refcount goes back to the
621
	 * expected level. */
621
	 * expected level. */
622
	hsw_enable_package_c8(dev_priv);
622
	hsw_enable_package_c8(dev_priv);
623
 
623
 
624
	mutex_lock(&dev_priv->modeset_restore_lock);
624
	mutex_lock(&dev_priv->modeset_restore_lock);
625
	dev_priv->modeset_restore = MODESET_DONE;
625
	dev_priv->modeset_restore = MODESET_DONE;
626
	mutex_unlock(&dev_priv->modeset_restore_lock);
626
	mutex_unlock(&dev_priv->modeset_restore_lock);
627
	return error;
627
	return error;
628
}
628
}
629
 
629
 
630
static int i915_drm_thaw(struct drm_device *dev)
630
static int i915_drm_thaw(struct drm_device *dev)
631
{
631
{
632
	int error = 0;
632
	int error = 0;
633
 
633
 
634
	intel_uncore_sanitize(dev);
634
	intel_uncore_sanitize(dev);
635
 
635
 
636
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
636
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
637
		mutex_lock(&dev->struct_mutex);
637
		mutex_lock(&dev->struct_mutex);
638
		i915_gem_restore_gtt_mappings(dev);
638
		i915_gem_restore_gtt_mappings(dev);
639
		mutex_unlock(&dev->struct_mutex);
639
		mutex_unlock(&dev->struct_mutex);
640
	} else if (drm_core_check_feature(dev, DRIVER_MODESET))
640
	} else if (drm_core_check_feature(dev, DRIVER_MODESET))
641
		i915_check_and_clear_faults(dev);
641
		i915_check_and_clear_faults(dev);
642
 
642
 
643
	__i915_drm_thaw(dev);
643
	__i915_drm_thaw(dev);
644
 
644
 
645
	return error;
645
	return error;
646
}
646
}
647
 
647
 
648
int i915_resume(struct drm_device *dev)
648
int i915_resume(struct drm_device *dev)
649
{
649
{
650
	struct drm_i915_private *dev_priv = dev->dev_private;
650
	struct drm_i915_private *dev_priv = dev->dev_private;
651
	int ret;
651
	int ret;
652
 
652
 
653
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
653
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
654
		return 0;
654
		return 0;
655
 
655
 
656
	if (pci_enable_device(dev->pdev))
656
	if (pci_enable_device(dev->pdev))
657
		return -EIO;
657
		return -EIO;
658
 
658
 
659
	pci_set_master(dev->pdev);
659
	pci_set_master(dev->pdev);
660
 
660
 
661
	intel_uncore_sanitize(dev);
661
	intel_uncore_sanitize(dev);
662
 
662
 
663
	/*
663
	/*
664
	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
664
	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
665
	 * earlier) need this since the BIOS might clear all our scratch PTEs.
665
	 * earlier) need this since the BIOS might clear all our scratch PTEs.
666
	 */
666
	 */
667
	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
667
	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
668
	    !dev_priv->opregion.header) {
668
	    !dev_priv->opregion.header) {
669
		mutex_lock(&dev->struct_mutex);
669
		mutex_lock(&dev->struct_mutex);
670
		i915_gem_restore_gtt_mappings(dev);
670
		i915_gem_restore_gtt_mappings(dev);
671
		mutex_unlock(&dev->struct_mutex);
671
		mutex_unlock(&dev->struct_mutex);
672
	}
672
	}
673
 
673
 
674
	ret = __i915_drm_thaw(dev);
674
	ret = __i915_drm_thaw(dev);
675
	if (ret)
675
	if (ret)
676
		return ret;
676
		return ret;
677
 
677
 
678
	drm_kms_helper_poll_enable(dev);
678
	drm_kms_helper_poll_enable(dev);
679
	return 0;
679
	return 0;
680
}
680
}
681
 
681
 
682
/**
682
/**
683
 * i915_reset - reset chip after a hang
683
 * i915_reset - reset chip after a hang
684
 * @dev: drm device to reset
684
 * @dev: drm device to reset
685
 *
685
 *
686
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
686
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
687
 * reset or otherwise an error code.
687
 * reset or otherwise an error code.
688
 *
688
 *
689
 * Procedure is fairly simple:
689
 * Procedure is fairly simple:
690
 *   - reset the chip using the reset reg
690
 *   - reset the chip using the reset reg
691
 *   - re-init context state
691
 *   - re-init context state
692
 *   - re-init hardware status page
692
 *   - re-init hardware status page
693
 *   - re-init ring buffer
693
 *   - re-init ring buffer
694
 *   - re-init interrupt state
694
 *   - re-init interrupt state
695
 *   - re-init display
695
 *   - re-init display
696
 */
696
 */
697
int i915_reset(struct drm_device *dev)
697
int i915_reset(struct drm_device *dev)
698
{
698
{
699
	drm_i915_private_t *dev_priv = dev->dev_private;
699
	drm_i915_private_t *dev_priv = dev->dev_private;
700
	bool simulated;
700
	bool simulated;
701
	int ret;
701
	int ret;
702
 
702
 
703
	if (!i915_try_reset)
703
	if (!i915_try_reset)
704
		return 0;
704
		return 0;
705
 
705
 
706
	mutex_lock(&dev->struct_mutex);
706
	mutex_lock(&dev->struct_mutex);
707
 
707
 
708
	i915_gem_reset(dev);
708
	i915_gem_reset(dev);
709
 
709
 
710
	simulated = dev_priv->gpu_error.stop_rings != 0;
710
	simulated = dev_priv->gpu_error.stop_rings != 0;
711
 
711
 
712
	if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
712
	if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
713
		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
713
		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
714
		ret = -ENODEV;
714
		ret = -ENODEV;
715
	} else {
715
	} else {
716
		ret = intel_gpu_reset(dev);
716
		ret = intel_gpu_reset(dev);
717
 
717
 
718
		/* Also reset the gpu hangman. */
718
		/* Also reset the gpu hangman. */
719
		if (simulated) {
719
		if (simulated) {
720
			DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
720
			DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
721
			dev_priv->gpu_error.stop_rings = 0;
721
			dev_priv->gpu_error.stop_rings = 0;
722
			if (ret == -ENODEV) {
722
			if (ret == -ENODEV) {
723
				DRM_ERROR("Reset not implemented, but ignoring "
723
				DRM_ERROR("Reset not implemented, but ignoring "
724
					  "error for simulated gpu hangs\n");
724
					  "error for simulated gpu hangs\n");
725
				ret = 0;
725
				ret = 0;
726
			}
726
			}
727
		} else
727
		} else
728
			dev_priv->gpu_error.last_reset = get_seconds();
728
			dev_priv->gpu_error.last_reset = get_seconds();
729
	}
729
	}
730
	if (ret) {
730
	if (ret) {
731
		DRM_ERROR("Failed to reset chip.\n");
731
		DRM_ERROR("Failed to reset chip.\n");
732
		mutex_unlock(&dev->struct_mutex);
732
		mutex_unlock(&dev->struct_mutex);
733
		return ret;
733
		return ret;
734
	}
734
	}
735
 
735
 
736
	/* Ok, now get things going again... */
736
	/* Ok, now get things going again... */
737
 
737
 
738
	/*
738
	/*
739
	 * Everything depends on having the GTT running, so we need to start
739
	 * Everything depends on having the GTT running, so we need to start
740
	 * there.  Fortunately we don't need to do this unless we reset the
740
	 * there.  Fortunately we don't need to do this unless we reset the
741
	 * chip at a PCI level.
741
	 * chip at a PCI level.
742
	 *
742
	 *
743
	 * Next we need to restore the context, but we don't use those
743
	 * Next we need to restore the context, but we don't use those
744
	 * yet either...
744
	 * yet either...
745
	 *
745
	 *
746
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
746
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
747
	 * was running at the time of the reset (i.e. we weren't VT
747
	 * was running at the time of the reset (i.e. we weren't VT
748
	 * switched away).
748
	 * switched away).
749
	 */
749
	 */
750
	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
750
	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
751
			!dev_priv->ums.mm_suspended) {
751
			!dev_priv->ums.mm_suspended) {
752
		struct intel_ring_buffer *ring;
752
		struct intel_ring_buffer *ring;
753
		int i;
753
		int i;
754
 
754
 
755
		dev_priv->ums.mm_suspended = 0;
755
		dev_priv->ums.mm_suspended = 0;
756
 
756
 
757
		i915_gem_init_swizzling(dev);
757
		i915_gem_init_swizzling(dev);
758
 
758
 
759
		for_each_ring(ring, dev_priv, i)
759
		for_each_ring(ring, dev_priv, i)
760
			ring->init(ring);
760
			ring->init(ring);
761
 
761
 
762
		i915_gem_context_init(dev);
762
		i915_gem_context_init(dev);
763
		if (dev_priv->mm.aliasing_ppgtt) {
763
		if (dev_priv->mm.aliasing_ppgtt) {
764
			ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
764
			ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
765
			if (ret)
765
			if (ret)
766
				i915_gem_cleanup_aliasing_ppgtt(dev);
766
				i915_gem_cleanup_aliasing_ppgtt(dev);
767
		}
767
		}
768
 
768
 
769
		/*
769
		/*
770
		 * It would make sense to re-init all the other hw state, at
770
		 * It would make sense to re-init all the other hw state, at
771
		 * least the rps/rc6/emon init done within modeset_init_hw. For
771
		 * least the rps/rc6/emon init done within modeset_init_hw. For
772
		 * some unknown reason, this blows up my ilk, so don't.
772
		 * some unknown reason, this blows up my ilk, so don't.
773
		 */
773
		 */
774
 
774
 
775
		mutex_unlock(&dev->struct_mutex);
775
		mutex_unlock(&dev->struct_mutex);
776
 
776
 
777
		drm_irq_uninstall(dev);
777
		drm_irq_uninstall(dev);
778
		drm_irq_install(dev);
778
		drm_irq_install(dev);
779
		intel_hpd_init(dev);
779
		intel_hpd_init(dev);
780
	} else {
780
	} else {
781
		mutex_unlock(&dev->struct_mutex);
781
		mutex_unlock(&dev->struct_mutex);
782
	}
782
	}
783
 
783
 
784
	return 0;
784
	return 0;
785
}
785
}
786
 
786
 
787
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
787
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
788
{
788
{
789
	struct intel_device_info *intel_info =
789
	struct intel_device_info *intel_info =
790
		(struct intel_device_info *) ent->driver_data;
790
		(struct intel_device_info *) ent->driver_data;
791
 
791
 
792
	/* Only bind to function 0 of the device. Early generations
792
	/* Only bind to function 0 of the device. Early generations
793
	 * used function 1 as a placeholder for multi-head. This causes
793
	 * used function 1 as a placeholder for multi-head. This causes
794
	 * us confusion instead, especially on the systems where both
794
	 * us confusion instead, especially on the systems where both
795
	 * functions have the same PCI-ID!
795
	 * functions have the same PCI-ID!
796
	 */
796
	 */
797
	if (PCI_FUNC(pdev->devfn))
797
	if (PCI_FUNC(pdev->devfn))
798
		return -ENODEV;
798
		return -ENODEV;
799
 
799
 
800
	/* We've managed to ship a kms-enabled ddx that shipped with an XvMC
800
	/* We've managed to ship a kms-enabled ddx that shipped with an XvMC
801
	 * implementation for gen3 (and only gen3) that used legacy drm maps
801
	 * implementation for gen3 (and only gen3) that used legacy drm maps
802
	 * (gasp!) to share buffers between X and the client. Hence we need to
802
	 * (gasp!) to share buffers between X and the client. Hence we need to
803
	 * keep around the fake agp stuff for gen3, even when kms is enabled. */
803
	 * keep around the fake agp stuff for gen3, even when kms is enabled. */
804
	if (intel_info->gen != 3) {
804
	if (intel_info->gen != 3) {
805
		driver.driver_features &=
805
		driver.driver_features &=
806
			~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
806
			~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
807
	} else if (!intel_agp_enabled) {
807
	} else if (!intel_agp_enabled) {
808
		DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
808
		DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
809
		return -ENODEV;
809
		return -ENODEV;
810
	}
810
	}
811
 
811
 
812
	return drm_get_pci_dev(pdev, ent, &driver);
812
	return drm_get_pci_dev(pdev, ent, &driver);
813
}
813
}
814
 
814
 
815
static void
815
static void
816
i915_pci_remove(struct pci_dev *pdev)
816
i915_pci_remove(struct pci_dev *pdev)
817
{
817
{
818
	struct drm_device *dev = pci_get_drvdata(pdev);
818
	struct drm_device *dev = pci_get_drvdata(pdev);
819
 
819
 
820
	drm_put_dev(dev);
820
	drm_put_dev(dev);
821
}
821
}
822
 
822
 
823
static int i915_pm_suspend(struct device *dev)
823
static int i915_pm_suspend(struct device *dev)
824
{
824
{
825
	struct pci_dev *pdev = to_pci_dev(dev);
825
	struct pci_dev *pdev = to_pci_dev(dev);
826
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
826
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
827
	int error;
827
	int error;
828
 
828
 
829
	if (!drm_dev || !drm_dev->dev_private) {
829
	if (!drm_dev || !drm_dev->dev_private) {
830
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
830
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
831
		return -ENODEV;
831
		return -ENODEV;
832
	}
832
	}
833
 
833
 
834
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
834
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
835
		return 0;
835
		return 0;
836
 
836
 
837
	error = i915_drm_freeze(drm_dev);
837
	error = i915_drm_freeze(drm_dev);
838
	if (error)
838
	if (error)
839
		return error;
839
		return error;
840
 
840
 
841
	pci_disable_device(pdev);
841
	pci_disable_device(pdev);
842
	pci_set_power_state(pdev, PCI_D3hot);
842
	pci_set_power_state(pdev, PCI_D3hot);
843
 
843
 
844
	return 0;
844
	return 0;
845
}
845
}
846
 
846
 
847
static int i915_pm_resume(struct device *dev)
847
static int i915_pm_resume(struct device *dev)
848
{
848
{
849
	struct pci_dev *pdev = to_pci_dev(dev);
849
	struct pci_dev *pdev = to_pci_dev(dev);
850
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
850
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
851
 
851
 
852
	return i915_resume(drm_dev);
852
	return i915_resume(drm_dev);
853
}
853
}
854
 
854
 
855
static int i915_pm_freeze(struct device *dev)
855
static int i915_pm_freeze(struct device *dev)
856
{
856
{
857
	struct pci_dev *pdev = to_pci_dev(dev);
857
	struct pci_dev *pdev = to_pci_dev(dev);
858
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
858
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
859
 
859
 
860
	if (!drm_dev || !drm_dev->dev_private) {
860
	if (!drm_dev || !drm_dev->dev_private) {
861
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
861
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
862
		return -ENODEV;
862
		return -ENODEV;
863
	}
863
	}
864
 
864
 
865
	return i915_drm_freeze(drm_dev);
865
	return i915_drm_freeze(drm_dev);
866
}
866
}
867
 
867
 
868
static int i915_pm_thaw(struct device *dev)
868
static int i915_pm_thaw(struct device *dev)
869
{
869
{
870
	struct pci_dev *pdev = to_pci_dev(dev);
870
	struct pci_dev *pdev = to_pci_dev(dev);
871
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
871
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
872
 
872
 
873
	return i915_drm_thaw(drm_dev);
873
	return i915_drm_thaw(drm_dev);
874
}
874
}
875
 
875
 
876
static int i915_pm_poweroff(struct device *dev)
876
static int i915_pm_poweroff(struct device *dev)
877
{
877
{
878
	struct pci_dev *pdev = to_pci_dev(dev);
878
	struct pci_dev *pdev = to_pci_dev(dev);
879
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
879
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
880
 
880
 
881
	return i915_drm_freeze(drm_dev);
881
	return i915_drm_freeze(drm_dev);
882
}
882
}
883
 
883
 
884
#endif
884
#endif
885
 
885
 
886
static struct drm_driver driver = {
886
static struct drm_driver driver = {
887
    /* Don't use MTRRs here; the Xserver or userspace app should
887
    /* Don't use MTRRs here; the Xserver or userspace app should
888
     * deal with them for Intel hardware.
888
     * deal with them for Intel hardware.
889
     */
889
     */
890
    .driver_features =
890
    .driver_features =
891
	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
891
	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
892
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
892
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
893
	    DRIVER_RENDER,
893
	    DRIVER_RENDER,
894
    .load = i915_driver_load,
894
    .load = i915_driver_load,
895
//    .unload = i915_driver_unload,
895
//    .unload = i915_driver_unload,
896
      .open = i915_driver_open,
896
      .open = i915_driver_open,
897
//    .lastclose = i915_driver_lastclose,
897
//    .lastclose = i915_driver_lastclose,
898
//    .preclose = i915_driver_preclose,
898
//    .preclose = i915_driver_preclose,
899
//    .postclose = i915_driver_postclose,
899
//    .postclose = i915_driver_postclose,
900
 
900
 
901
    /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
901
    /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
902
//    .suspend = i915_suspend,
902
//    .suspend = i915_suspend,
903
//    .resume = i915_resume,
903
//    .resume = i915_resume,
904
 
904
 
905
//    .device_is_agp = i915_driver_device_is_agp,
905
//    .device_is_agp = i915_driver_device_is_agp,
906
//    .master_create = i915_master_create,
906
//    .master_create = i915_master_create,
907
//    .master_destroy = i915_master_destroy,
907
//    .master_destroy = i915_master_destroy,
908
#if defined(CONFIG_DEBUG_FS)
908
#if defined(CONFIG_DEBUG_FS)
909
	.debugfs_init = i915_debugfs_init,
909
	.debugfs_init = i915_debugfs_init,
910
	.debugfs_cleanup = i915_debugfs_cleanup,
910
	.debugfs_cleanup = i915_debugfs_cleanup,
911
#endif
911
#endif
912
    .gem_init_object = i915_gem_init_object,
912
    .gem_init_object = i915_gem_init_object,
913
    .gem_free_object = i915_gem_free_object,
913
    .gem_free_object = i915_gem_free_object,
914
 
914
 
915
//    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
915
//    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
916
//    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
916
//    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
917
//    .gem_prime_export = i915_gem_prime_export,
917
//    .gem_prime_export = i915_gem_prime_export,
918
//    .gem_prime_import = i915_gem_prime_import,
918
//    .gem_prime_import = i915_gem_prime_import,
919
 
919
 
920
//    .dumb_create = i915_gem_dumb_create,
920
//    .dumb_create = i915_gem_dumb_create,
921
//    .dumb_map_offset = i915_gem_mmap_gtt,
921
//    .dumb_map_offset = i915_gem_mmap_gtt,
922
//    .dumb_destroy = i915_gem_dumb_destroy,
922
//    .dumb_destroy = i915_gem_dumb_destroy,
923
//    .ioctls = i915_ioctls,
923
//    .ioctls = i915_ioctls,
924
//    .fops = &i915_driver_fops,
924
//    .fops = &i915_driver_fops,
925
//    .name = DRIVER_NAME,
925
//    .name = DRIVER_NAME,
926
//    .desc = DRIVER_DESC,
926
//    .desc = DRIVER_DESC,
927
//    .date = DRIVER_DATE,
927
//    .date = DRIVER_DATE,
928
//    .major = DRIVER_MAJOR,
928
//    .major = DRIVER_MAJOR,
929
//    .minor = DRIVER_MINOR,
929
//    .minor = DRIVER_MINOR,
930
//    .patchlevel = DRIVER_PATCHLEVEL,
930
//    .patchlevel = DRIVER_PATCHLEVEL,
931
};
931
};
932
 
932
 
933
 
933
 
934
 
934
 
935
 
935
 
936
int i915_init(void)
936
int i915_init(void)
937
{
937
{
938
    static pci_dev_t device;
938
    static pci_dev_t device;
939
    const struct pci_device_id  *ent;
939
    const struct pci_device_id  *ent;
940
    int  err;
940
    int  err;
941
 
941
 
942
    ent = find_pci_device(&device, pciidlist);
942
    ent = find_pci_device(&device, pciidlist);
943
    if( unlikely(ent == NULL) )
943
    if( unlikely(ent == NULL) )
944
    {
944
    {
945
        dbgprintf("device not found\n");
945
        dbgprintf("device not found\n");
946
        return -ENODEV;
946
        return -ENODEV;
947
    };
947
    };
948
 
948
 
949
    drm_core_init();
949
    drm_core_init();
950
 
950
 
951
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
951
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
952
                                device.pci_dev.device);
952
                                device.pci_dev.device);
953
 
953
 
954
    driver.driver_features |= DRIVER_MODESET;
954
    driver.driver_features |= DRIVER_MODESET;
955
 
955
 
956
    err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
956
    err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
957
 
957
 
958
    return err;
958
    return err;
959
}
959
}
960
 
960
961
 
-
 
962
/* We give fast paths for the really cool registers */
-
 
963
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
-
 
964
	((HAS_FORCE_WAKE((dev_priv)->dev)) && \
-
 
965
	 ((reg) < 0x40000) &&            \
-
 
966
	 ((reg) != FORCEWAKE))
-
 
967
 
-
 
968
static bool IS_DISPLAYREG(u32 reg)
-
 
969
{
-
 
970
	/*
-
 
971
	 * This should make it easier to transition modules over to the
-
 
972
	 * new register block scheme, since we can do it incrementally.
-
 
973
	 */
-
 
974
	if (reg >= VLV_DISPLAY_BASE)
-
 
975
		return false;
-
 
976
 
-
 
977
	if (reg >= RENDER_RING_BASE &&
-
 
978
	    reg < RENDER_RING_BASE + 0xff)
-
 
979
		return false;
-
 
980
	if (reg >= GEN6_BSD_RING_BASE &&
-
 
981
	    reg < GEN6_BSD_RING_BASE + 0xff)
-
 
982
		return false;
-
 
983
	if (reg >= BLT_RING_BASE &&
-
 
984
	    reg < BLT_RING_BASE + 0xff)
-
 
985
		return false;
-
 
986
 
-
 
987
	if (reg == PGTBL_ER)
-
 
988
		return false;
-
 
989
 
-
 
990
	if (reg >= IPEIR_I965 &&
-
 
991
	    reg < HWSTAM)
-
 
992
		return false;
-
 
993
 
-
 
994
	if (reg == MI_MODE)
-
 
995
		return false;
-
 
996
 
-
 
997
	if (reg == GFX_MODE_GEN7)
-
 
998
		return false;
-
 
999
 
-
 
1000
	if (reg == RENDER_HWS_PGA_GEN7 ||
-
 
1001
	    reg == BSD_HWS_PGA_GEN7 ||
-
 
1002
	    reg == BLT_HWS_PGA_GEN7)
-
 
1003
		return false;
-
 
1004
 
-
 
1005
	if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL ||
-
 
1006
	    reg == GEN6_BSD_RNCID)
-
 
1007
		return false;
-
 
1008
 
-
 
1009
	if (reg == GEN6_BLITTER_ECOSKPD)
-
 
1010
		return false;
-
 
1011
 
-
 
1012
	if (reg >= 0x4000c &&
-
 
1013
	    reg <= 0x4002c)
-
 
1014
		return false;
-
 
1015
 
-
 
1016
	if (reg >= 0x4f000 &&
-
 
1017
	    reg <= 0x4f08f)
-
 
1018
		return false;
-
 
1019
 
-
 
1020
	if (reg >= 0x4f100 &&
-
 
1021
	    reg <= 0x4f11f)
-
 
1022
		return false;
-
 
1023
 
-
 
1024
	if (reg >= VLV_MASTER_IER &&
-
 
1025
	    reg <= GEN6_PMIER)
-
 
1026
		return false;
-
 
1027
 
-
 
1028
	if (reg >= FENCE_REG_SANDYBRIDGE_0 &&
-
 
1029
	    reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8)))
-
 
1030
		return false;
-
 
1031
 
-
 
1032
	if (reg >= VLV_IIR_RW &&
-
 
1033
	    reg <= VLV_ISR)
-
 
1034
		return false;
-
 
1035
 
-
 
1036
	if (reg == FORCEWAKE_VLV ||
-
 
1037
	    reg == FORCEWAKE_ACK_VLV)
-
 
1038
		return false;
-
 
1039
 
-
 
1040
	if (reg == GEN6_GDRST)
-
 
1041
		return false;
-
 
1042
 
-
 
1043
	switch (reg) {
-
 
1044
	case _3D_CHICKEN3:
-
 
1045
	case IVB_CHICKEN3:
-
 
1046
	case GEN7_COMMON_SLICE_CHICKEN1:
-
 
1047
	case GEN7_L3CNTLREG1:
-
 
1048
	case GEN7_L3_CHICKEN_MODE_REGISTER:
-
 
1049
	case GEN7_ROW_CHICKEN2:
-
 
1050
	case GEN7_L3SQCREG4:
-
 
1051
	case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
-
 
1052
	case GEN7_HALF_SLICE_CHICKEN1:
-
 
1053
	case GEN6_MBCTL:
-
 
1054
	case GEN6_UCGCTL2:
-
 
1055
		return false;
-
 
1056
	default:
-
 
1057
		break;
-
 
1058
	}
-
 
1059
 
-
 
1060
	return true;
-
 
1061
}
-
 
1062
 
-
 
1063
/* We give fast paths for the really cool registers */
-
 
1064
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
-
 
1065
	((HAS_FORCE_WAKE((dev_priv)->dev)) && \
-
 
1066
	 ((reg) < 0x40000) &&            \
-
 
1067
	 ((reg) != FORCEWAKE))
-
 
1068
static void
-
 
1069
ilk_dummy_write(struct drm_i915_private *dev_priv)
-
 
1070
{
-
 
1071
	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
-
 
1072
	 * the chip from rc6 before touching it for real. MI_MODE is masked,
-
 
1073
	 * hence harmless to write 0 into. */
-
 
1074
	I915_WRITE_NOTRACE(MI_MODE, 0);
-
 
1075
}
-
 
1076
 
-
 
1077
static void
-
 
1078
hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
-
 
1079
{
-
 
1080
	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
-
 
1081
	    (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
-
 
1082
		DRM_ERROR("Unknown unclaimed register before writing to %x\n",
-
 
1083
			  reg);
-
 
1084
		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-
 
1085
	}
-
 
1086
}
-
 
1087
 
-
 
1088
static void
-
 
1089
hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
-
 
1090
{
-
 
1091
	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
-
 
1092
	    (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
-
 
1093
		DRM_ERROR("Unclaimed write to %x\n", reg);
-
 
1094
		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-
 
1095
	}
-
 
1096
}
-