Subversion Repositories Kolibri OS

Rev

Rev 4560 | Rev 5354 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4560 Rev 5060
1
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
1
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2
 */
2
 */
3
/*
3
/*
4
 *
4
 *
5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6
 * All Rights Reserved.
6
 * All Rights Reserved.
7
 *
7
 *
8
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * Permission is hereby granted, free of charge, to any person obtaining a
9
 * copy of this software and associated documentation files (the
9
 * copy of this software and associated documentation files (the
10
 * "Software"), to deal in the Software without restriction, including
10
 * "Software"), to deal in the Software without restriction, including
11
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * without limitation the rights to use, copy, modify, merge, publish,
12
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * distribute, sub license, and/or sell copies of the Software, and to
13
 * permit persons to whom the Software is furnished to do so, subject to
13
 * permit persons to whom the Software is furnished to do so, subject to
14
 * the following conditions:
14
 * the following conditions:
15
 *
15
 *
16
 * The above copyright notice and this permission notice (including the
16
 * The above copyright notice and this permission notice (including the
17
 * next paragraph) shall be included in all copies or substantial portions
17
 * next paragraph) shall be included in all copies or substantial portions
18
 * of the Software.
18
 * of the Software.
19
 *
19
 *
20
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
 *
27
 *
28
 */
28
 */
29
 
29
 
30
//#include 
30
//#include 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include "i915_drv.h"
33
#include "i915_drv.h"
34
#include "i915_trace.h"
34
#include "i915_trace.h"
35
#include "intel_drv.h"
35
#include "intel_drv.h"
36
 
36
 
37
#include 
37
#include 
38
#include 
38
#include 
39
#include 
39
#include 
40
#include 
40
#include 
-
 
41
#include  
41
 
42
 
42
#include 
43
#include 
43
 
44
 
44
#include 
45
#include 
45
 
46
 
46
#define __read_mostly
47
#define __read_mostly
-
 
48
 
-
 
49
static struct drm_driver driver;
-
 
50
 
-
 
51
#define GEN_DEFAULT_PIPEOFFSETS \
-
 
52
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
-
 
53
			  PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
-
 
54
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
-
 
55
			   TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
-
 
56
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
-
 
57
 
-
 
58
#define GEN_CHV_PIPEOFFSETS \
-
 
59
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
-
 
60
			  CHV_PIPE_C_OFFSET }, \
-
 
61
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
-
 
62
			   CHV_TRANSCODER_C_OFFSET, }, \
-
 
63
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
-
 
64
			     CHV_PALETTE_C_OFFSET }
-
 
65
 
-
 
66
#define CURSOR_OFFSETS \
-
 
67
	.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
-
 
68
 
-
 
69
#define IVB_CURSOR_OFFSETS \
-
 
70
	.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
47
 
71
 
48
int init_display_kms(struct drm_device *dev);
-
 
49
 
-
 
50
static int i915_modeset __read_mostly = 1;
-
 
51
module_param_named(modeset, i915_modeset, int, 0400);
-
 
52
MODULE_PARM_DESC(modeset,
-
 
53
		"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
-
 
54
		"1=on, -1=force vga console preference [default])");
-
 
55
 
-
 
56
unsigned int i915_fbpercrtc __always_unused = 0;
-
 
57
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
-
 
58
 
-
 
59
int i915_panel_ignore_lid __read_mostly         =  1;
-
 
60
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
-
 
61
MODULE_PARM_DESC(panel_ignore_lid,
-
 
62
		"Override lid status (0=autodetect, 1=autodetect disabled [default], "
-
 
63
		"-1=force lid closed, -2=force lid open)");
-
 
64
 
-
 
65
unsigned int i915_powersave __read_mostly = 1;
-
 
66
module_param_named(powersave, i915_powersave, int, 0600);
-
 
67
MODULE_PARM_DESC(powersave,
-
 
68
		"Enable powersavings, fbc, downclocking, etc. (default: true)");
-
 
69
 
-
 
70
int i915_semaphores __read_mostly = -1;
-
 
71
module_param_named(semaphores, i915_semaphores, int, 0400);
-
 
72
MODULE_PARM_DESC(semaphores,
-
 
73
		"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
-
 
74
 
-
 
75
int i915_enable_rc6 __read_mostly = -1;
-
 
76
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
-
 
77
MODULE_PARM_DESC(i915_enable_rc6,
-
 
78
		"Enable power-saving render C-state 6. "
-
 
79
		"Different stages can be selected via bitmask values "
-
 
80
		"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
-
 
81
		"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
-
 
82
		"default: -1 (use per-chip default)");
-
 
83
 
-
 
84
int i915_enable_fbc __read_mostly = -1;
-
 
85
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
-
 
86
MODULE_PARM_DESC(i915_enable_fbc,
-
 
87
		"Enable frame buffer compression for power savings "
-
 
88
		"(default: -1 (use per-chip default))");
-
 
89
 
-
 
90
unsigned int i915_lvds_downclock  __read_mostly =  0;
-
 
91
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
-
 
92
MODULE_PARM_DESC(lvds_downclock,
-
 
93
		"Use panel (LVDS/eDP) downclocking for power savings "
-
 
94
		"(default: false)");
-
 
95
 
-
 
96
int i915_lvds_channel_mode __read_mostly;
-
 
97
module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
-
 
98
MODULE_PARM_DESC(lvds_channel_mode,
-
 
99
		 "Specify LVDS channel mode "
-
 
100
		 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
-
 
101
 
-
 
102
int i915_panel_use_ssc __read_mostly = -1;
-
 
103
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
-
 
104
MODULE_PARM_DESC(lvds_use_ssc,
-
 
105
		"Use Spread Spectrum Clock with panels [LVDS/eDP] "
-
 
106
		"(default: auto from VBT)");
-
 
107
 
-
 
108
int i915_vbt_sdvo_panel_type __read_mostly      = -1;
-
 
109
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
-
 
110
MODULE_PARM_DESC(vbt_sdvo_panel_type,
-
 
111
		"Override/Ignore selection of SDVO panel mode in the VBT "
-
 
112
		"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
-
 
113
 
-
 
114
static bool i915_try_reset __read_mostly = true;
-
 
115
module_param_named(reset, i915_try_reset, bool, 0600);
-
 
116
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
-
 
117
 
-
 
118
bool i915_enable_hangcheck __read_mostly = false;
-
 
119
module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
-
 
120
MODULE_PARM_DESC(enable_hangcheck,
-
 
121
		"Periodically check GPU activity for detecting hangs. "
-
 
122
		"WARNING: Disabling this can cause system wide hangs. "
-
 
123
		"(default: true)");
-
 
124
 
-
 
125
int i915_enable_ppgtt __read_mostly = -1;
-
 
126
module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400);
-
 
127
MODULE_PARM_DESC(i915_enable_ppgtt,
-
 
128
		"Enable PPGTT (default: true)");
-
 
129
 
-
 
130
int i915_enable_psr __read_mostly = 0;
-
 
131
module_param_named(enable_psr, i915_enable_psr, int, 0600);
-
 
132
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
-
 
133
 
-
 
134
unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
-
 
135
module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
-
 
136
MODULE_PARM_DESC(preliminary_hw_support,
-
 
137
		"Enable preliminary hardware support.");
-
 
138
 
-
 
139
int i915_disable_power_well __read_mostly = 1;
-
 
140
module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
-
 
141
MODULE_PARM_DESC(disable_power_well,
-
 
142
		 "Disable the power well when possible (default: true)");
-
 
143
 
-
 
144
int i915_enable_ips __read_mostly = 1;
-
 
145
module_param_named(enable_ips, i915_enable_ips, int, 0600);
-
 
146
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
-
 
147
 
-
 
148
bool i915_fastboot __read_mostly = 0;
-
 
149
module_param_named(fastboot, i915_fastboot, bool, 0600);
-
 
150
MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
-
 
151
		 "(default: false)");
-
 
152
 
-
 
153
int i915_enable_pc8 __read_mostly = 0;
-
 
154
module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
-
 
155
MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
-
 
156
 
-
 
157
int i915_pc8_timeout __read_mostly = 5000;
-
 
158
module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
-
 
159
MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
-
 
160
 
-
 
161
bool i915_prefault_disable __read_mostly;
-
 
162
module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
-
 
163
MODULE_PARM_DESC(prefault_disable,
-
 
164
		"Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
72
int init_display_kms(struct drm_device *dev);
165
 
73
 
166
static struct drm_driver driver;
74
 
167
extern int intel_agp_enabled;
75
extern int intel_agp_enabled;
168
 
76
 
169
#define PCI_VENDOR_ID_INTEL        0x8086
77
#define PCI_VENDOR_ID_INTEL        0x8086
170
 
78
 
171
 
79
 
172
static const struct intel_device_info intel_i915g_info = {
80
static const struct intel_device_info intel_i915g_info = {
173
	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
81
	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
174
	.has_overlay = 1, .overlay_needs_physical = 1,
82
	.has_overlay = 1, .overlay_needs_physical = 1,
175
	.ring_mask = RENDER_RING,
83
	.ring_mask = RENDER_RING,
-
 
84
	GEN_DEFAULT_PIPEOFFSETS,
-
 
85
	CURSOR_OFFSETS,
176
};
86
};
177
static const struct intel_device_info intel_i915gm_info = {
87
static const struct intel_device_info intel_i915gm_info = {
178
	.gen = 3, .is_mobile = 1, .num_pipes = 2,
88
	.gen = 3, .is_mobile = 1, .num_pipes = 2,
179
	.cursor_needs_physical = 1,
89
	.cursor_needs_physical = 1,
180
	.has_overlay = 1, .overlay_needs_physical = 1,
90
	.has_overlay = 1, .overlay_needs_physical = 1,
181
	.supports_tv = 1,
91
	.supports_tv = 1,
182
	.has_fbc = 1,
92
	.has_fbc = 1,
183
	.ring_mask = RENDER_RING,
93
	.ring_mask = RENDER_RING,
-
 
94
	GEN_DEFAULT_PIPEOFFSETS,
-
 
95
	CURSOR_OFFSETS,
184
};
96
};
185
static const struct intel_device_info intel_i945g_info = {
97
static const struct intel_device_info intel_i945g_info = {
186
	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
98
	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
187
	.has_overlay = 1, .overlay_needs_physical = 1,
99
	.has_overlay = 1, .overlay_needs_physical = 1,
188
	.ring_mask = RENDER_RING,
100
	.ring_mask = RENDER_RING,
-
 
101
	GEN_DEFAULT_PIPEOFFSETS,
-
 
102
	CURSOR_OFFSETS,
189
};
103
};
190
static const struct intel_device_info intel_i945gm_info = {
104
static const struct intel_device_info intel_i945gm_info = {
191
	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
105
	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
192
	.has_hotplug = 1, .cursor_needs_physical = 1,
106
	.has_hotplug = 1, .cursor_needs_physical = 1,
193
	.has_overlay = 1, .overlay_needs_physical = 1,
107
	.has_overlay = 1, .overlay_needs_physical = 1,
194
	.supports_tv = 1,
108
	.supports_tv = 1,
195
	.has_fbc = 1,
109
	.has_fbc = 1,
196
	.ring_mask = RENDER_RING,
110
	.ring_mask = RENDER_RING,
-
 
111
	GEN_DEFAULT_PIPEOFFSETS,
-
 
112
	CURSOR_OFFSETS,
197
};
113
};
198
 
114
 
199
static const struct intel_device_info intel_i965g_info = {
115
static const struct intel_device_info intel_i965g_info = {
200
	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
116
	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
201
	.has_hotplug = 1,
117
	.has_hotplug = 1,
202
	.has_overlay = 1,
118
	.has_overlay = 1,
203
	.ring_mask = RENDER_RING,
119
	.ring_mask = RENDER_RING,
-
 
120
	GEN_DEFAULT_PIPEOFFSETS,
-
 
121
	CURSOR_OFFSETS,
204
};
122
};
205
 
123
 
206
static const struct intel_device_info intel_i965gm_info = {
124
static const struct intel_device_info intel_i965gm_info = {
207
	.gen = 4, .is_crestline = 1, .num_pipes = 2,
125
	.gen = 4, .is_crestline = 1, .num_pipes = 2,
208
	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
126
	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
209
	.has_overlay = 1,
127
	.has_overlay = 1,
210
	.supports_tv = 1,
128
	.supports_tv = 1,
211
	.ring_mask = RENDER_RING,
129
	.ring_mask = RENDER_RING,
-
 
130
	GEN_DEFAULT_PIPEOFFSETS,
-
 
131
	CURSOR_OFFSETS,
212
};
132
};
213
 
133
 
214
static const struct intel_device_info intel_g33_info = {
134
static const struct intel_device_info intel_g33_info = {
215
	.gen = 3, .is_g33 = 1, .num_pipes = 2,
135
	.gen = 3, .is_g33 = 1, .num_pipes = 2,
216
	.need_gfx_hws = 1, .has_hotplug = 1,
136
	.need_gfx_hws = 1, .has_hotplug = 1,
217
	.has_overlay = 1,
137
	.has_overlay = 1,
218
	.ring_mask = RENDER_RING,
138
	.ring_mask = RENDER_RING,
-
 
139
	GEN_DEFAULT_PIPEOFFSETS,
-
 
140
	CURSOR_OFFSETS,
219
};
141
};
220
 
142
 
221
static const struct intel_device_info intel_g45_info = {
143
static const struct intel_device_info intel_g45_info = {
222
	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
144
	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
223
	.has_pipe_cxsr = 1, .has_hotplug = 1,
145
	.has_pipe_cxsr = 1, .has_hotplug = 1,
224
	.ring_mask = RENDER_RING | BSD_RING,
146
	.ring_mask = RENDER_RING | BSD_RING,
-
 
147
	GEN_DEFAULT_PIPEOFFSETS,
-
 
148
	CURSOR_OFFSETS,
225
};
149
};
226
 
150
 
227
static const struct intel_device_info intel_gm45_info = {
151
static const struct intel_device_info intel_gm45_info = {
228
	.gen = 4, .is_g4x = 1, .num_pipes = 2,
152
	.gen = 4, .is_g4x = 1, .num_pipes = 2,
229
	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
153
	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
230
	.has_pipe_cxsr = 1, .has_hotplug = 1,
154
	.has_pipe_cxsr = 1, .has_hotplug = 1,
231
	.supports_tv = 1,
155
	.supports_tv = 1,
232
	.ring_mask = RENDER_RING | BSD_RING,
156
	.ring_mask = RENDER_RING | BSD_RING,
-
 
157
	GEN_DEFAULT_PIPEOFFSETS,
-
 
158
	CURSOR_OFFSETS,
233
};
159
};
234
 
160
 
235
static const struct intel_device_info intel_pineview_info = {
161
static const struct intel_device_info intel_pineview_info = {
236
	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
162
	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
237
	.need_gfx_hws = 1, .has_hotplug = 1,
163
	.need_gfx_hws = 1, .has_hotplug = 1,
238
	.has_overlay = 1,
164
	.has_overlay = 1,
-
 
165
	GEN_DEFAULT_PIPEOFFSETS,
-
 
166
	CURSOR_OFFSETS,
239
};
167
};
240
 
168
 
241
static const struct intel_device_info intel_ironlake_d_info = {
169
static const struct intel_device_info intel_ironlake_d_info = {
242
	.gen = 5, .num_pipes = 2,
170
	.gen = 5, .num_pipes = 2,
243
	.need_gfx_hws = 1, .has_hotplug = 1,
171
	.need_gfx_hws = 1, .has_hotplug = 1,
244
	.ring_mask = RENDER_RING | BSD_RING,
172
	.ring_mask = RENDER_RING | BSD_RING,
-
 
173
	GEN_DEFAULT_PIPEOFFSETS,
-
 
174
	CURSOR_OFFSETS,
245
};
175
};
246
 
176
 
247
static const struct intel_device_info intel_ironlake_m_info = {
177
static const struct intel_device_info intel_ironlake_m_info = {
248
	.gen = 5, .is_mobile = 1, .num_pipes = 2,
178
	.gen = 5, .is_mobile = 1, .num_pipes = 2,
249
	.need_gfx_hws = 1, .has_hotplug = 1,
179
	.need_gfx_hws = 1, .has_hotplug = 1,
250
	.has_fbc = 1,
180
	.has_fbc = 1,
251
	.ring_mask = RENDER_RING | BSD_RING,
181
	.ring_mask = RENDER_RING | BSD_RING,
-
 
182
	GEN_DEFAULT_PIPEOFFSETS,
-
 
183
	CURSOR_OFFSETS,
252
};
184
};
253
 
185
 
254
static const struct intel_device_info intel_sandybridge_d_info = {
186
static const struct intel_device_info intel_sandybridge_d_info = {
255
	.gen = 6, .num_pipes = 2,
187
	.gen = 6, .num_pipes = 2,
256
	.need_gfx_hws = 1, .has_hotplug = 1,
188
	.need_gfx_hws = 1, .has_hotplug = 1,
257
	.has_fbc = 1,
189
	.has_fbc = 1,
258
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
190
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
259
	.has_llc = 1,
191
	.has_llc = 1,
-
 
192
	GEN_DEFAULT_PIPEOFFSETS,
-
 
193
	CURSOR_OFFSETS,
260
};
194
};
261
 
195
 
262
static const struct intel_device_info intel_sandybridge_m_info = {
196
static const struct intel_device_info intel_sandybridge_m_info = {
263
	.gen = 6, .is_mobile = 1, .num_pipes = 2,
197
	.gen = 6, .is_mobile = 1, .num_pipes = 2,
264
	.need_gfx_hws = 1, .has_hotplug = 1,
198
	.need_gfx_hws = 1, .has_hotplug = 1,
265
    .has_fbc      = 1,
199
    .has_fbc      = 1,
266
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
200
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
267
	.has_llc = 1,
201
	.has_llc = 1,
-
 
202
	GEN_DEFAULT_PIPEOFFSETS,
-
 
203
	CURSOR_OFFSETS,
268
};
204
};
269
 
205
 
270
#define GEN7_FEATURES  \
206
#define GEN7_FEATURES  \
271
	.gen = 7, .num_pipes = 3, \
207
	.gen = 7, .num_pipes = 3, \
272
	.need_gfx_hws = 1, .has_hotplug = 1, \
208
	.need_gfx_hws = 1, .has_hotplug = 1, \
273
	.has_fbc = 1, \
209
	.has_fbc = 1, \
274
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
210
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
275
	.has_llc = 1
211
	.has_llc = 1
276
 
212
 
277
static const struct intel_device_info intel_ivybridge_d_info = {
213
static const struct intel_device_info intel_ivybridge_d_info = {
278
	GEN7_FEATURES,
214
	GEN7_FEATURES,
279
	.is_ivybridge = 1,
215
	.is_ivybridge = 1,
-
 
216
	GEN_DEFAULT_PIPEOFFSETS,
-
 
217
	IVB_CURSOR_OFFSETS,
280
};
218
};
281
 
219
 
282
static const struct intel_device_info intel_ivybridge_m_info = {
220
static const struct intel_device_info intel_ivybridge_m_info = {
283
	GEN7_FEATURES,
221
	GEN7_FEATURES,
284
	.is_ivybridge = 1,
222
	.is_ivybridge = 1,
285
	.is_mobile = 1,
223
	.is_mobile = 1,
-
 
224
	GEN_DEFAULT_PIPEOFFSETS,
-
 
225
	IVB_CURSOR_OFFSETS,
286
};
226
};
287
 
227
 
288
static const struct intel_device_info intel_ivybridge_q_info = {
228
static const struct intel_device_info intel_ivybridge_q_info = {
289
	GEN7_FEATURES,
229
	GEN7_FEATURES,
290
	.is_ivybridge = 1,
230
	.is_ivybridge = 1,
291
	.num_pipes = 0, /* legal, last one wins */
231
	.num_pipes = 0, /* legal, last one wins */
-
 
232
	GEN_DEFAULT_PIPEOFFSETS,
-
 
233
	IVB_CURSOR_OFFSETS,
292
};
234
};
293
 
235
 
294
static const struct intel_device_info intel_valleyview_m_info = {
236
static const struct intel_device_info intel_valleyview_m_info = {
295
	GEN7_FEATURES,
237
	GEN7_FEATURES,
296
	.is_mobile = 1,
238
	.is_mobile = 1,
297
	.num_pipes = 2,
239
	.num_pipes = 2,
298
	.is_valleyview = 1,
240
	.is_valleyview = 1,
299
	.display_mmio_offset = VLV_DISPLAY_BASE,
241
	.display_mmio_offset = VLV_DISPLAY_BASE,
300
	.has_fbc = 0, /* legal, last one wins */
242
	.has_fbc = 0, /* legal, last one wins */
301
	.has_llc = 0, /* legal, last one wins */
243
	.has_llc = 0, /* legal, last one wins */
-
 
244
	GEN_DEFAULT_PIPEOFFSETS,
-
 
245
	CURSOR_OFFSETS,
302
};
246
};
303
 
247
 
304
static const struct intel_device_info intel_valleyview_d_info = {
248
static const struct intel_device_info intel_valleyview_d_info = {
305
	GEN7_FEATURES,
249
	GEN7_FEATURES,
306
	.num_pipes = 2,
250
	.num_pipes = 2,
307
	.is_valleyview = 1,
251
	.is_valleyview = 1,
308
	.display_mmio_offset = VLV_DISPLAY_BASE,
252
	.display_mmio_offset = VLV_DISPLAY_BASE,
309
	.has_fbc = 0, /* legal, last one wins */
253
	.has_fbc = 0, /* legal, last one wins */
310
	.has_llc = 0, /* legal, last one wins */
254
	.has_llc = 0, /* legal, last one wins */
-
 
255
	GEN_DEFAULT_PIPEOFFSETS,
-
 
256
	CURSOR_OFFSETS,
311
};
257
};
312
 
258
 
313
static const struct intel_device_info intel_haswell_d_info = {
259
static const struct intel_device_info intel_haswell_d_info = {
314
	GEN7_FEATURES,
260
	GEN7_FEATURES,
315
	.is_haswell = 1,
261
	.is_haswell = 1,
316
	.has_ddi = 1,
262
	.has_ddi = 1,
317
	.has_fpga_dbg = 1,
263
	.has_fpga_dbg = 1,
318
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
264
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-
 
265
	GEN_DEFAULT_PIPEOFFSETS,
-
 
266
	IVB_CURSOR_OFFSETS,
319
};
267
};
320
 
268
 
321
static const struct intel_device_info intel_haswell_m_info = {
269
static const struct intel_device_info intel_haswell_m_info = {
322
	GEN7_FEATURES,
270
	GEN7_FEATURES,
323
	.is_haswell = 1,
271
	.is_haswell = 1,
324
	.is_mobile = 1,
272
	.is_mobile = 1,
325
	.has_ddi = 1,
273
	.has_ddi = 1,
326
	.has_fpga_dbg = 1,
274
	.has_fpga_dbg = 1,
327
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
275
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-
 
276
	GEN_DEFAULT_PIPEOFFSETS,
-
 
277
	IVB_CURSOR_OFFSETS,
328
};
278
};
329
 
279
 
330
static const struct intel_device_info intel_broadwell_d_info = {
280
static const struct intel_device_info intel_broadwell_d_info = {
331
	.gen = 8, .num_pipes = 3,
281
	.gen = 8, .num_pipes = 3,
332
	.need_gfx_hws = 1, .has_hotplug = 1,
282
	.need_gfx_hws = 1, .has_hotplug = 1,
333
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
283
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
334
	.has_llc = 1,
284
	.has_llc = 1,
335
	.has_ddi = 1,
285
	.has_ddi = 1,
-
 
286
	.has_fpga_dbg = 1,
-
 
287
	.has_fbc = 1,
-
 
288
	GEN_DEFAULT_PIPEOFFSETS,
-
 
289
	IVB_CURSOR_OFFSETS,
336
};
290
};
337
 
291
 
338
static const struct intel_device_info intel_broadwell_m_info = {
292
static const struct intel_device_info intel_broadwell_m_info = {
339
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
293
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
340
	.need_gfx_hws = 1, .has_hotplug = 1,
294
	.need_gfx_hws = 1, .has_hotplug = 1,
341
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
295
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
342
	.has_llc = 1,
296
	.has_llc = 1,
343
	.has_ddi = 1,
297
	.has_ddi = 1,
-
 
298
	.has_fpga_dbg = 1,
-
 
299
	.has_fbc = 1,
-
 
300
	GEN_DEFAULT_PIPEOFFSETS,
-
 
301
	IVB_CURSOR_OFFSETS,
-
 
302
};
-
 
303
 
-
 
304
static const struct intel_device_info intel_broadwell_gt3d_info = {
-
 
305
	.gen = 8, .num_pipes = 3,
-
 
306
	.need_gfx_hws = 1, .has_hotplug = 1,
-
 
307
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-
 
308
	.has_llc = 1,
-
 
309
	.has_ddi = 1,
-
 
310
	.has_fpga_dbg = 1,
-
 
311
	.has_fbc = 1,
-
 
312
	GEN_DEFAULT_PIPEOFFSETS,
-
 
313
	IVB_CURSOR_OFFSETS,
-
 
314
};
-
 
315
 
-
 
316
static const struct intel_device_info intel_broadwell_gt3m_info = {
-
 
317
	.gen = 8, .is_mobile = 1, .num_pipes = 3,
-
 
318
	.need_gfx_hws = 1, .has_hotplug = 1,
-
 
319
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-
 
320
	.has_llc = 1,
-
 
321
	.has_ddi = 1,
-
 
322
	.has_fpga_dbg = 1,
-
 
323
	.has_fbc = 1,
-
 
324
	GEN_DEFAULT_PIPEOFFSETS,
-
 
325
	IVB_CURSOR_OFFSETS,
-
 
326
};
-
 
327
 
-
 
328
static const struct intel_device_info intel_cherryview_info = {
-
 
329
	.is_preliminary = 1,
-
 
330
	.gen = 8, .num_pipes = 3,
-
 
331
	.need_gfx_hws = 1, .has_hotplug = 1,
-
 
332
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-
 
333
	.is_valleyview = 1,
-
 
334
	.display_mmio_offset = VLV_DISPLAY_BASE,
-
 
335
	GEN_CHV_PIPEOFFSETS,
-
 
336
	CURSOR_OFFSETS,
344
};
337
};
345
 
338
 
346
/*
339
/*
347
 * Make sure any device matches here are from most specific to most
340
 * Make sure any device matches here are from most specific to most
348
 * general.  For example, since the Quanta match is based on the subsystem
341
 * general.  For example, since the Quanta match is based on the subsystem
349
 * and subvendor IDs, we need it to come before the more general IVB
342
 * and subvendor IDs, we need it to come before the more general IVB
350
 * PCI ID matches, otherwise we'll use the wrong info struct above.
343
 * PCI ID matches, otherwise we'll use the wrong info struct above.
351
 */
344
 */
352
#define INTEL_PCI_IDS \
345
#define INTEL_PCI_IDS \
353
	INTEL_I915G_IDS(&intel_i915g_info),	\
346
	INTEL_I915G_IDS(&intel_i915g_info),	\
354
	INTEL_I915GM_IDS(&intel_i915gm_info),	\
347
	INTEL_I915GM_IDS(&intel_i915gm_info),	\
355
	INTEL_I945G_IDS(&intel_i945g_info),	\
348
	INTEL_I945G_IDS(&intel_i945g_info),	\
356
	INTEL_I945GM_IDS(&intel_i945gm_info),	\
349
	INTEL_I945GM_IDS(&intel_i945gm_info),	\
357
	INTEL_I965G_IDS(&intel_i965g_info),	\
350
	INTEL_I965G_IDS(&intel_i965g_info),	\
358
	INTEL_G33_IDS(&intel_g33_info),		\
351
	INTEL_G33_IDS(&intel_g33_info),		\
359
	INTEL_I965GM_IDS(&intel_i965gm_info),	\
352
	INTEL_I965GM_IDS(&intel_i965gm_info),	\
360
	INTEL_GM45_IDS(&intel_gm45_info), 	\
353
	INTEL_GM45_IDS(&intel_gm45_info), 	\
361
	INTEL_G45_IDS(&intel_g45_info), 	\
354
	INTEL_G45_IDS(&intel_g45_info), 	\
362
	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
355
	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
363
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
356
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
364
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
357
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
365
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
358
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
366
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
359
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
367
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
360
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
368
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
361
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
369
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
362
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
370
	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
363
	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
371
	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
364
	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
372
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
365
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
373
	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
366
	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
374
	INTEL_BDW_M_IDS(&intel_broadwell_m_info),	\
367
	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),	\
375
	INTEL_BDW_D_IDS(&intel_broadwell_d_info)
368
	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),	\
-
 
369
	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),	\
-
 
370
	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
-
 
371
	INTEL_CHV_IDS(&intel_cherryview_info)
376
 
372
 
377
static const struct pci_device_id pciidlist[] = {       /* aka */
373
static const struct pci_device_id pciidlist[] = {       /* aka */
378
	INTEL_PCI_IDS,
374
	INTEL_PCI_IDS,
379
    {0, 0, 0}
375
    {0, 0, 0}
380
};
376
};
381
 
377
 
382
#define INTEL_PCH_DEVICE_ID_MASK        0xff00
378
#define INTEL_PCH_DEVICE_ID_MASK        0xff00
383
#define INTEL_PCH_IBX_DEVICE_ID_TYPE    0x3b00
379
#define INTEL_PCH_IBX_DEVICE_ID_TYPE    0x3b00
384
#define INTEL_PCH_CPT_DEVICE_ID_TYPE    0x1c00
380
#define INTEL_PCH_CPT_DEVICE_ID_TYPE    0x1c00
385
#define INTEL_PCH_PPT_DEVICE_ID_TYPE    0x1e00
381
#define INTEL_PCH_PPT_DEVICE_ID_TYPE    0x1e00
386
#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
382
#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
387
 
383
 
388
void intel_detect_pch(struct drm_device *dev)
384
void intel_detect_pch(struct drm_device *dev)
389
{
385
{
390
    struct drm_i915_private *dev_priv = dev->dev_private;
386
    struct drm_i915_private *dev_priv = dev->dev_private;
391
    struct pci_dev *pch;
387
	struct pci_dev *pch = NULL;
392
 
388
 
393
	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
389
	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
394
	 * (which really amounts to a PCH but no South Display).
390
	 * (which really amounts to a PCH but no South Display).
395
	 */
391
	 */
396
	if (INTEL_INFO(dev)->num_pipes == 0) {
392
	if (INTEL_INFO(dev)->num_pipes == 0) {
397
		dev_priv->pch_type = PCH_NOP;
393
		dev_priv->pch_type = PCH_NOP;
398
		return;
394
		return;
399
	}
395
	}
400
 
396
 
401
    /*
397
    /*
402
     * The reason to probe ISA bridge instead of Dev31:Fun0 is to
398
     * The reason to probe ISA bridge instead of Dev31:Fun0 is to
403
     * make graphics device passthrough work easy for VMM, that only
399
     * make graphics device passthrough work easy for VMM, that only
404
     * need to expose ISA bridge to let driver know the real hardware
400
     * need to expose ISA bridge to let driver know the real hardware
405
     * underneath. This is a requirement from virtualization team.
401
     * underneath. This is a requirement from virtualization team.
406
	 *
402
	 *
407
	 * In some virtualized environments (e.g. XEN), there is irrelevant
403
	 * In some virtualized environments (e.g. XEN), there is irrelevant
408
	 * ISA bridge in the system. To work reliably, we should scan trhough
404
	 * ISA bridge in the system. To work reliably, we should scan trhough
409
	 * all the ISA bridge devices and check for the first match, instead
405
	 * all the ISA bridge devices and check for the first match, instead
410
	 * of only checking the first one.
406
	 * of only checking the first one.
411
     */
407
     */
412
    pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
408
	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
413
	while (pch) {
-
 
414
		struct pci_dev *curr = pch;
-
 
415
        if (pch->vendor == PCI_VENDOR_ID_INTEL) {
409
        if (pch->vendor == PCI_VENDOR_ID_INTEL) {
416
			unsigned short id;
-
 
417
            id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
410
			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
418
			dev_priv->pch_id = id;
411
			dev_priv->pch_id = id;
419
 
412
 
420
            if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
413
            if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
421
                dev_priv->pch_type = PCH_IBX;
414
                dev_priv->pch_type = PCH_IBX;
422
                DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
415
                DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
423
				WARN_ON(!IS_GEN5(dev));
416
				WARN_ON(!IS_GEN5(dev));
424
            } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
417
            } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
425
                dev_priv->pch_type = PCH_CPT;
418
                dev_priv->pch_type = PCH_CPT;
426
                DRM_DEBUG_KMS("Found CougarPoint PCH\n");
419
                DRM_DEBUG_KMS("Found CougarPoint PCH\n");
427
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
420
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
428
            } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
421
            } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
429
                /* PantherPoint is CPT compatible */
422
                /* PantherPoint is CPT compatible */
430
                dev_priv->pch_type = PCH_CPT;
423
                dev_priv->pch_type = PCH_CPT;
431
				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
424
				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
432
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
425
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
433
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
426
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
434
				dev_priv->pch_type = PCH_LPT;
427
				dev_priv->pch_type = PCH_LPT;
435
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
428
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
436
				WARN_ON(!IS_HASWELL(dev));
429
				WARN_ON(!IS_HASWELL(dev));
437
				WARN_ON(IS_ULT(dev));
430
				WARN_ON(IS_ULT(dev));
438
			} else if (IS_BROADWELL(dev)) {
431
			} else if (IS_BROADWELL(dev)) {
439
				dev_priv->pch_type = PCH_LPT;
432
				dev_priv->pch_type = PCH_LPT;
440
				dev_priv->pch_id =
433
				dev_priv->pch_id =
441
					INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
434
					INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
442
				DRM_DEBUG_KMS("This is Broadwell, assuming "
435
				DRM_DEBUG_KMS("This is Broadwell, assuming "
443
					      "LynxPoint LP PCH\n");
436
					      "LynxPoint LP PCH\n");
444
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
437
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
445
				dev_priv->pch_type = PCH_LPT;
438
				dev_priv->pch_type = PCH_LPT;
446
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
439
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
447
				WARN_ON(!IS_HASWELL(dev));
440
				WARN_ON(!IS_HASWELL(dev));
448
				WARN_ON(!IS_ULT(dev));
441
				WARN_ON(!IS_ULT(dev));
449
			} else {
442
			} else
450
				goto check_next;
443
				continue;
451
            }
444
 
452
			break;
445
			break;
453
        }
446
        }
454
check_next:
-
 
455
		pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
-
 
456
//       pci_dev_put(curr);
-
 
457
    }
447
    }
458
	if (!pch)
448
	if (!pch)
459
		DRM_DEBUG_KMS("No PCH found?\n");
449
		DRM_DEBUG_KMS("No PCH found.\n");
-
 
450
 
-
 
451
//	pci_dev_put(pch);
460
}
452
}
461
 
453
 
462
bool i915_semaphore_is_enabled(struct drm_device *dev)
454
bool i915_semaphore_is_enabled(struct drm_device *dev)
463
{
455
{
464
	if (INTEL_INFO(dev)->gen < 6)
456
	if (INTEL_INFO(dev)->gen < 6)
465
		return false;
457
		return false;
-
 
458
 
-
 
459
	if (i915.semaphores >= 0)
-
 
460
		return i915.semaphores;
466
 
461
 
467
	/* Until we get further testing... */
462
	/* Until we get further testing... */
468
	if (IS_GEN8(dev)) {
-
 
469
		WARN_ON(!i915_preliminary_hw_support);
463
	if (IS_GEN8(dev))
470
		return false;
-
 
471
	}
-
 
472
 
-
 
473
	if (i915_semaphores >= 0)
-
 
474
		return i915_semaphores;
464
		return false;
475
 
465
 
476
#ifdef CONFIG_INTEL_IOMMU
466
#ifdef CONFIG_INTEL_IOMMU
477
	/* Enable semaphores on SNB when IO remapping is off */
467
	/* Enable semaphores on SNB when IO remapping is off */
478
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
468
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
479
		return false;
469
		return false;
480
#endif
470
#endif
481
 
471
 
482
	return true;
472
	return true;
483
}
473
}
484
 
474
 
485
#if 0
475
#if 0
-
 
476
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
-
 
477
{
-
 
478
	struct drm_device *dev = dev_priv->dev;
-
 
479
	struct drm_encoder *encoder;
-
 
480
 
-
 
481
	drm_modeset_lock_all(dev);
-
 
482
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-
 
483
		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
-
 
484
 
-
 
485
		if (intel_encoder->suspend)
-
 
486
			intel_encoder->suspend(intel_encoder);
-
 
487
	}
-
 
488
	drm_modeset_unlock_all(dev);
-
 
489
}
-
 
490
 
486
static int i915_drm_freeze(struct drm_device *dev)
491
static int i915_drm_freeze(struct drm_device *dev)
487
{
492
{
488
	struct drm_i915_private *dev_priv = dev->dev_private;
493
	struct drm_i915_private *dev_priv = dev->dev_private;
489
	struct drm_crtc *crtc;
494
	struct drm_crtc *crtc;
490
 
-
 
491
	intel_runtime_pm_get(dev_priv);
495
	pci_power_t opregion_target_state;
492
 
496
 
493
	/* ignore lid events during suspend */
497
	/* ignore lid events during suspend */
494
	mutex_lock(&dev_priv->modeset_restore_lock);
498
	mutex_lock(&dev_priv->modeset_restore_lock);
495
	dev_priv->modeset_restore = MODESET_SUSPENDED;
499
	dev_priv->modeset_restore = MODESET_SUSPENDED;
496
	mutex_unlock(&dev_priv->modeset_restore_lock);
500
	mutex_unlock(&dev_priv->modeset_restore_lock);
497
 
501
 
498
	/* We do a lot of poking in a lot of registers, make sure they work
502
	/* We do a lot of poking in a lot of registers, make sure they work
499
	 * properly. */
503
	 * properly. */
500
	hsw_disable_package_c8(dev_priv);
-
 
501
	intel_display_set_init_power(dev, true);
504
	intel_display_set_init_power(dev_priv, true);
502
 
505
 
503
	drm_kms_helper_poll_disable(dev);
506
	drm_kms_helper_poll_disable(dev);
504
 
507
 
505
	pci_save_state(dev->pdev);
508
	pci_save_state(dev->pdev);
506
 
509
 
507
	/* If KMS is active, we do the leavevt stuff here */
510
	/* If KMS is active, we do the leavevt stuff here */
508
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
511
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
509
		int error;
512
		int error;
510
 
513
 
511
		error = i915_gem_suspend(dev);
514
		error = i915_gem_suspend(dev);
512
		if (error) {
515
		if (error) {
513
			dev_err(&dev->pdev->dev,
516
			dev_err(&dev->pdev->dev,
514
				"GEM idle failed, resume might fail\n");
517
				"GEM idle failed, resume might fail\n");
515
			return error;
518
			return error;
516
		}
519
		}
517
 
-
 
518
		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
-
 
519
 
-
 
520
		drm_irq_uninstall(dev);
-
 
521
		dev_priv->enable_hotplug_processing = false;
520
 
522
		/*
521
		/*
523
		 * Disable CRTCs directly since we want to preserve sw state
522
		 * Disable CRTCs directly since we want to preserve sw state
524
		 * for _thaw.
523
		 * for _thaw. Also, power gate the CRTC power wells.
525
		 */
524
		 */
526
		mutex_lock(&dev->mode_config.mutex);
525
		drm_modeset_lock_all(dev);
527
		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
526
		for_each_crtc(dev, crtc)
-
 
527
			intel_crtc_control(crtc, false);
-
 
528
		drm_modeset_unlock_all(dev);
-
 
529
 
-
 
530
		intel_dp_mst_suspend(dev);
-
 
531
 
-
 
532
		flush_delayed_work(&dev_priv->rps.delayed_resume_work);
528
			dev_priv->display.crtc_disable(crtc);
533
 
-
 
534
		intel_runtime_pm_disable_interrupts(dev);
-
 
535
		intel_suspend_encoders(dev_priv);
-
 
536
 
529
		mutex_unlock(&dev->mode_config.mutex);
537
		intel_suspend_gt_powersave(dev);
530
 
538
 
531
		intel_modeset_suspend_hw(dev);
539
		intel_modeset_suspend_hw(dev);
532
	}
540
	}
533
 
541
 
534
	i915_gem_suspend_gtt_mappings(dev);
542
	i915_gem_suspend_gtt_mappings(dev);
535
 
543
 
536
	i915_save_state(dev);
544
	i915_save_state(dev);
-
 
545
 
-
 
546
	opregion_target_state = PCI_D3cold;
-
 
547
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
-
 
548
	if (acpi_target_system_state() < ACPI_STATE_S3)
-
 
549
		opregion_target_state = PCI_D1;
-
 
550
#endif
-
 
551
	intel_opregion_notify_adapter(dev, opregion_target_state);
-
 
552
 
537
 
553
	intel_uncore_forcewake_reset(dev, false);
538
	intel_opregion_fini(dev);
554
	intel_opregion_fini(dev);
539
 
555
 
540
	console_lock();
556
	console_lock();
541
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
557
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
542
	console_unlock();
558
	console_unlock();
-
 
559
 
-
 
560
	dev_priv->suspend_count++;
-
 
561
 
-
 
562
	intel_display_set_init_power(dev_priv, false);
543
 
563
 
544
	return 0;
564
	return 0;
545
}
565
}
546
 
566
 
547
int i915_suspend(struct drm_device *dev, pm_message_t state)
567
int i915_suspend(struct drm_device *dev, pm_message_t state)
548
{
568
{
549
	int error;
569
	int error;
550
 
570
 
551
	if (!dev || !dev->dev_private) {
571
	if (!dev || !dev->dev_private) {
552
		DRM_ERROR("dev: %p\n", dev);
572
		DRM_ERROR("dev: %p\n", dev);
553
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
573
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
554
		return -ENODEV;
574
		return -ENODEV;
555
	}
575
	}
556
 
576
 
557
	if (state.event == PM_EVENT_PRETHAW)
577
	if (state.event == PM_EVENT_PRETHAW)
558
		return 0;
578
		return 0;
559
 
579
 
560
 
580
 
561
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
581
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
562
		return 0;
582
		return 0;
563
 
583
 
564
	error = i915_drm_freeze(dev);
584
	error = i915_drm_freeze(dev);
565
	if (error)
585
	if (error)
566
		return error;
586
		return error;
567
 
587
 
568
	if (state.event == PM_EVENT_SUSPEND) {
588
	if (state.event == PM_EVENT_SUSPEND) {
569
		/* Shut down the device */
589
		/* Shut down the device */
570
		pci_disable_device(dev->pdev);
590
		pci_disable_device(dev->pdev);
571
		pci_set_power_state(dev->pdev, PCI_D3hot);
591
		pci_set_power_state(dev->pdev, PCI_D3hot);
572
	}
592
	}
573
 
593
 
574
	return 0;
594
	return 0;
575
}
595
}
576
 
596
 
577
void intel_console_resume(struct work_struct *work)
597
void intel_console_resume(struct work_struct *work)
578
{
598
{
579
	struct drm_i915_private *dev_priv =
599
	struct drm_i915_private *dev_priv =
580
		container_of(work, struct drm_i915_private,
600
		container_of(work, struct drm_i915_private,
581
			     console_resume_work);
601
			     console_resume_work);
582
	struct drm_device *dev = dev_priv->dev;
602
	struct drm_device *dev = dev_priv->dev;
583
 
603
 
584
	console_lock();
604
	console_lock();
585
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
605
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
586
	console_unlock();
606
	console_unlock();
587
}
607
}
588
 
608
 
589
static void intel_resume_hotplug(struct drm_device *dev)
609
static int i915_drm_thaw_early(struct drm_device *dev)
590
{
610
{
591
	struct drm_mode_config *mode_config = &dev->mode_config;
-
 
592
	struct intel_encoder *encoder;
-
 
593
 
-
 
594
	mutex_lock(&mode_config->mutex);
-
 
595
	DRM_DEBUG_KMS("running encoder hotplug functions\n");
-
 
596
 
611
	struct drm_i915_private *dev_priv = dev->dev_private;
597
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
612
 
-
 
613
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
598
		if (encoder->hot_plug)
614
		hsw_disable_pc8(dev_priv);
-
 
615
 
599
			encoder->hot_plug(encoder);
-
 
600
 
616
	intel_uncore_early_sanitize(dev, true);
601
	mutex_unlock(&mode_config->mutex);
617
	intel_uncore_sanitize(dev);
602
 
618
	intel_power_domains_init_hw(dev_priv);
603
	/* Just fire off a uevent and let userspace tell us what to do */
619
 
604
	drm_helper_hpd_irq_event(dev);
620
	return 0;
605
}
621
}
606
 
622
 
607
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
623
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
608
{
624
{
609
	struct drm_i915_private *dev_priv = dev->dev_private;
625
	struct drm_i915_private *dev_priv = dev->dev_private;
610
	int error = 0;
-
 
611
 
-
 
612
	intel_uncore_early_sanitize(dev);
-
 
613
 
-
 
614
	intel_uncore_sanitize(dev);
-
 
615
 
626
 
616
	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
627
	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
617
	    restore_gtt_mappings) {
628
	    restore_gtt_mappings) {
618
		mutex_lock(&dev->struct_mutex);
629
		mutex_lock(&dev->struct_mutex);
619
		i915_gem_restore_gtt_mappings(dev);
630
		i915_gem_restore_gtt_mappings(dev);
620
		mutex_unlock(&dev->struct_mutex);
631
		mutex_unlock(&dev->struct_mutex);
621
	}
632
	}
622
 
-
 
623
	intel_power_domains_init_hw(dev);
-
 
624
 
633
 
625
	i915_restore_state(dev);
634
	i915_restore_state(dev);
626
	intel_opregion_setup(dev);
635
	intel_opregion_setup(dev);
627
 
636
 
628
	/* KMS EnterVT equivalent */
637
	/* KMS EnterVT equivalent */
629
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
638
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
630
		intel_init_pch_refclk(dev);
639
		intel_init_pch_refclk(dev);
-
 
640
		drm_mode_config_reset(dev);
631
 
641
 
632
		mutex_lock(&dev->struct_mutex);
-
 
633
 
642
		mutex_lock(&dev->struct_mutex);
-
 
643
		if (i915_gem_init_hw(dev)) {
-
 
644
			DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
-
 
645
			atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
634
		error = i915_gem_init_hw(dev);
646
		}
635
		mutex_unlock(&dev->struct_mutex);
-
 
636
 
647
		mutex_unlock(&dev->struct_mutex);
637
		/* We need working interrupts for modeset enabling ... */
648
 
638
		drm_irq_install(dev);
649
		intel_runtime_pm_restore_interrupts(dev);
639
 
650
 
640
		intel_modeset_init_hw(dev);
651
		intel_modeset_init_hw(dev);
-
 
652
 
-
 
653
		{
-
 
654
			unsigned long irqflags;
-
 
655
			spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
656
			if (dev_priv->display.hpd_irq_setup)
-
 
657
				dev_priv->display.hpd_irq_setup(dev);
-
 
658
			spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
 
659
		}
-
 
660
 
641
 
661
		intel_dp_mst_resume(dev);
642
		drm_modeset_lock_all(dev);
-
 
643
		drm_mode_config_reset(dev);
662
		drm_modeset_lock_all(dev);
644
		intel_modeset_setup_hw_state(dev, true);
663
		intel_modeset_setup_hw_state(dev, true);
645
		drm_modeset_unlock_all(dev);
664
		drm_modeset_unlock_all(dev);
646
 
665
 
647
		/*
666
		/*
648
		 * ... but also need to make sure that hotplug processing
667
		 * ... but also need to make sure that hotplug processing
649
		 * doesn't cause havoc. Like in the driver load code we don't
668
		 * doesn't cause havoc. Like in the driver load code we don't
650
		 * bother with the tiny race here where we might loose hotplug
669
		 * bother with the tiny race here where we might loose hotplug
651
		 * notifications.
670
		 * notifications.
652
		 * */
671
		 * */
653
		intel_hpd_init(dev);
672
		intel_hpd_init(dev);
654
		dev_priv->enable_hotplug_processing = true;
-
 
655
		/* Config may have changed between suspend and resume */
673
		/* Config may have changed between suspend and resume */
656
		intel_resume_hotplug(dev);
674
		drm_helper_hpd_irq_event(dev);
657
	}
675
	}
658
 
676
 
659
	intel_opregion_init(dev);
677
	intel_opregion_init(dev);
660
 
678
 
661
	/*
679
	/*
662
	 * The console lock can be pretty contented on resume due
680
	 * The console lock can be pretty contented on resume due
663
	 * to all the printk activity.  Try to keep it out of the hot
681
	 * to all the printk activity.  Try to keep it out of the hot
664
	 * path of resume if possible.
682
	 * path of resume if possible.
665
	 */
683
	 */
666
	if (console_trylock()) {
684
	if (console_trylock()) {
667
		intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
685
		intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
668
		console_unlock();
686
		console_unlock();
669
	} else {
687
	} else {
670
		schedule_work(&dev_priv->console_resume_work);
688
		schedule_work(&dev_priv->console_resume_work);
671
	}
689
	}
672
 
-
 
673
	/* Undo what we did at i915_drm_freeze so the refcount goes back to the
-
 
674
	 * expected level. */
-
 
675
	hsw_enable_package_c8(dev_priv);
-
 
676
 
690
 
677
	mutex_lock(&dev_priv->modeset_restore_lock);
691
	mutex_lock(&dev_priv->modeset_restore_lock);
678
	dev_priv->modeset_restore = MODESET_DONE;
692
	dev_priv->modeset_restore = MODESET_DONE;
679
	mutex_unlock(&dev_priv->modeset_restore_lock);
693
	mutex_unlock(&dev_priv->modeset_restore_lock);
680
 
694
 
-
 
695
	intel_opregion_notify_adapter(dev, PCI_D0);
681
	intel_runtime_pm_put(dev_priv);
696
 
682
	return error;
697
	return 0;
683
}
698
}
684
 
699
 
685
static int i915_drm_thaw(struct drm_device *dev)
700
static int i915_drm_thaw(struct drm_device *dev)
686
{
701
{
687
	if (drm_core_check_feature(dev, DRIVER_MODESET))
702
	if (drm_core_check_feature(dev, DRIVER_MODESET))
688
		i915_check_and_clear_faults(dev);
703
		i915_check_and_clear_faults(dev);
689
 
704
 
690
	return __i915_drm_thaw(dev, true);
705
	return __i915_drm_thaw(dev, true);
691
}
706
}
692
 
707
 
693
int i915_resume(struct drm_device *dev)
708
static int i915_resume_early(struct drm_device *dev)
694
{
-
 
695
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
696
	int ret;
-
 
697
 
709
{
698
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
710
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
699
		return 0;
711
		return 0;
-
 
712
 
-
 
713
	/*
-
 
714
	 * We have a resume ordering issue with the snd-hda driver also
-
 
715
	 * requiring our device to be power up. Due to the lack of a
-
 
716
	 * parent/child relationship we currently solve this with an early
-
 
717
	 * resume hook.
-
 
718
	 *
-
 
719
	 * FIXME: This should be solved with a special hdmi sink device or
-
 
720
	 * similar so that power domains can be employed.
700
 
721
	 */
701
	if (pci_enable_device(dev->pdev))
722
	if (pci_enable_device(dev->pdev))
702
		return -EIO;
723
		return -EIO;
703
 
724
 
704
	pci_set_master(dev->pdev);
725
	pci_set_master(dev->pdev);
-
 
726
 
-
 
727
	return i915_drm_thaw_early(dev);
-
 
728
}
-
 
729
 
-
 
730
int i915_resume(struct drm_device *dev)
-
 
731
{
-
 
732
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
733
	int ret;
705
 
734
 
706
	/*
735
	/*
707
	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
736
	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
708
	 * earlier) need to restore the GTT mappings since the BIOS might clear
737
	 * earlier) need to restore the GTT mappings since the BIOS might clear
709
	 * all our scratch PTEs.
738
	 * all our scratch PTEs.
710
	 */
739
	 */
711
	ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
740
	ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
712
	if (ret)
741
	if (ret)
713
		return ret;
742
		return ret;
714
 
743
 
715
	drm_kms_helper_poll_enable(dev);
744
	drm_kms_helper_poll_enable(dev);
716
	return 0;
745
	return 0;
717
}
746
}
-
 
747
 
-
 
748
static int i915_resume_legacy(struct drm_device *dev)
-
 
749
{
-
 
750
	i915_resume_early(dev);
-
 
751
	i915_resume(dev);
-
 
752
 
-
 
753
	return 0;
-
 
754
}
718
 
755
 
719
/**
756
/**
720
 * i915_reset - reset chip after a hang
757
 * i915_reset - reset chip after a hang
721
 * @dev: drm device to reset
758
 * @dev: drm device to reset
722
 *
759
 *
723
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
760
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
724
 * reset or otherwise an error code.
761
 * reset or otherwise an error code.
725
 *
762
 *
726
 * Procedure is fairly simple:
763
 * Procedure is fairly simple:
727
 *   - reset the chip using the reset reg
764
 *   - reset the chip using the reset reg
728
 *   - re-init context state
765
 *   - re-init context state
729
 *   - re-init hardware status page
766
 *   - re-init hardware status page
730
 *   - re-init ring buffer
767
 *   - re-init ring buffer
731
 *   - re-init interrupt state
768
 *   - re-init interrupt state
732
 *   - re-init display
769
 *   - re-init display
733
 */
770
 */
734
int i915_reset(struct drm_device *dev)
771
int i915_reset(struct drm_device *dev)
735
{
772
{
736
	drm_i915_private_t *dev_priv = dev->dev_private;
773
	struct drm_i915_private *dev_priv = dev->dev_private;
737
	bool simulated;
774
	bool simulated;
738
	int ret;
775
	int ret;
739
 
776
 
740
	if (!i915_try_reset)
777
	if (!i915.reset)
741
		return 0;
778
		return 0;
742
 
779
 
743
	mutex_lock(&dev->struct_mutex);
780
	mutex_lock(&dev->struct_mutex);
744
 
781
 
745
	i915_gem_reset(dev);
782
	i915_gem_reset(dev);
746
 
783
 
747
	simulated = dev_priv->gpu_error.stop_rings != 0;
784
	simulated = dev_priv->gpu_error.stop_rings != 0;
748
 
785
 
749
		ret = intel_gpu_reset(dev);
786
		ret = intel_gpu_reset(dev);
750
 
787
 
751
		/* Also reset the gpu hangman. */
788
		/* Also reset the gpu hangman. */
752
		if (simulated) {
789
		if (simulated) {
753
			DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
790
			DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
754
			dev_priv->gpu_error.stop_rings = 0;
791
			dev_priv->gpu_error.stop_rings = 0;
755
			if (ret == -ENODEV) {
792
			if (ret == -ENODEV) {
756
			DRM_INFO("Reset not implemented, but ignoring "
793
			DRM_INFO("Reset not implemented, but ignoring "
757
					  "error for simulated gpu hangs\n");
794
					  "error for simulated gpu hangs\n");
758
				ret = 0;
795
				ret = 0;
759
			}
796
			}
760
	}
797
	}
761
 
798
 
762
	if (ret) {
799
	if (ret) {
763
		DRM_ERROR("Failed to reset chip: %i\n", ret);
800
		DRM_ERROR("Failed to reset chip: %i\n", ret);
764
		mutex_unlock(&dev->struct_mutex);
801
		mutex_unlock(&dev->struct_mutex);
765
		return ret;
802
		return ret;
766
	}
803
	}
767
 
804
 
768
	/* Ok, now get things going again... */
805
	/* Ok, now get things going again... */
769
 
806
 
770
	/*
807
	/*
771
	 * Everything depends on having the GTT running, so we need to start
808
	 * Everything depends on having the GTT running, so we need to start
772
	 * there.  Fortunately we don't need to do this unless we reset the
809
	 * there.  Fortunately we don't need to do this unless we reset the
773
	 * chip at a PCI level.
810
	 * chip at a PCI level.
774
	 *
811
	 *
775
	 * Next we need to restore the context, but we don't use those
812
	 * Next we need to restore the context, but we don't use those
776
	 * yet either...
813
	 * yet either...
777
	 *
814
	 *
778
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
815
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
779
	 * was running at the time of the reset (i.e. we weren't VT
816
	 * was running at the time of the reset (i.e. we weren't VT
780
	 * switched away).
817
	 * switched away).
781
	 */
818
	 */
782
	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
819
	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
783
			!dev_priv->ums.mm_suspended) {
820
			!dev_priv->ums.mm_suspended) {
784
		dev_priv->ums.mm_suspended = 0;
821
		dev_priv->ums.mm_suspended = 0;
785
 
822
 
786
		ret = i915_gem_init_hw(dev);
823
		ret = i915_gem_init_hw(dev);
787
		mutex_unlock(&dev->struct_mutex);
824
		mutex_unlock(&dev->struct_mutex);
788
		if (ret) {
825
		if (ret) {
789
			DRM_ERROR("Failed hw init on reset %d\n", ret);
826
			DRM_ERROR("Failed hw init on reset %d\n", ret);
790
			return ret;
827
			return ret;
791
		}
828
		}
-
 
829
 
-
 
830
		/*
-
 
831
		 * FIXME: This races pretty badly against concurrent holders of
-
 
832
		 * ring interrupts. This is possible since we've started to drop
-
 
833
		 * dev->struct_mutex in select places when waiting for the gpu.
-
 
834
		 */
-
 
835
 
-
 
836
		/*
-
 
837
		 * rps/rc6 re-init is necessary to restore state lost after the
-
 
838
		 * reset and the re-install of gt irqs. Skip for ironlake per
792
 
839
		 * previous concerns that it doesn't respond well to some forms
-
 
840
		 * of re-init after reset.
-
 
841
		 */
793
		drm_irq_uninstall(dev);
842
		if (INTEL_INFO(dev)->gen > 5)
-
 
843
			intel_reset_gt_powersave(dev);
794
		drm_irq_install(dev);
844
 
795
		intel_hpd_init(dev);
845
		intel_hpd_init(dev);
796
	} else {
846
	} else {
797
		mutex_unlock(&dev->struct_mutex);
847
		mutex_unlock(&dev->struct_mutex);
798
	}
848
	}
799
 
849
 
800
	return 0;
850
	return 0;
801
}
851
}
802
 
852
 
803
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
853
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
804
{
854
{
805
	struct intel_device_info *intel_info =
855
	struct intel_device_info *intel_info =
806
		(struct intel_device_info *) ent->driver_data;
856
		(struct intel_device_info *) ent->driver_data;
807
 
857
 
808
	if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
858
	if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
809
		DRM_INFO("This hardware requires preliminary hardware support.\n"
859
		DRM_INFO("This hardware requires preliminary hardware support.\n"
810
			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
860
			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
811
		return -ENODEV;
861
		return -ENODEV;
812
	}
862
	}
813
 
863
 
814
	/* Only bind to function 0 of the device. Early generations
864
	/* Only bind to function 0 of the device. Early generations
815
	 * used function 1 as a placeholder for multi-head. This causes
865
	 * used function 1 as a placeholder for multi-head. This causes
816
	 * us confusion instead, especially on the systems where both
866
	 * us confusion instead, especially on the systems where both
817
	 * functions have the same PCI-ID!
867
	 * functions have the same PCI-ID!
818
	 */
868
	 */
819
	if (PCI_FUNC(pdev->devfn))
869
	if (PCI_FUNC(pdev->devfn))
820
		return -ENODEV;
870
		return -ENODEV;
821
 
-
 
822
	/* We've managed to ship a kms-enabled ddx that shipped with an XvMC
-
 
823
	 * implementation for gen3 (and only gen3) that used legacy drm maps
-
 
824
	 * (gasp!) to share buffers between X and the client. Hence we need to
-
 
825
	 * keep around the fake agp stuff for gen3, even when kms is enabled. */
-
 
826
	if (intel_info->gen != 3) {
871
 
827
		driver.driver_features &=
-
 
828
			~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
-
 
829
	} else if (!intel_agp_enabled) {
-
 
830
		DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
-
 
831
		return -ENODEV;
-
 
832
	}
872
	driver.driver_features &= ~(DRIVER_USE_AGP);
833
 
873
 
834
	return drm_get_pci_dev(pdev, ent, &driver);
874
	return drm_get_pci_dev(pdev, ent, &driver);
835
}
875
}
836
 
876
 
837
static void
877
static void
838
i915_pci_remove(struct pci_dev *pdev)
878
i915_pci_remove(struct pci_dev *pdev)
839
{
879
{
840
	struct drm_device *dev = pci_get_drvdata(pdev);
880
	struct drm_device *dev = pci_get_drvdata(pdev);
841
 
881
 
842
	drm_put_dev(dev);
882
	drm_put_dev(dev);
843
}
883
}
844
 
884
 
845
static int i915_pm_suspend(struct device *dev)
885
static int i915_pm_suspend(struct device *dev)
846
{
886
{
847
	struct pci_dev *pdev = to_pci_dev(dev);
887
	struct pci_dev *pdev = to_pci_dev(dev);
848
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
888
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
849
	int error;
-
 
850
 
889
 
851
	if (!drm_dev || !drm_dev->dev_private) {
890
	if (!drm_dev || !drm_dev->dev_private) {
852
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
891
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
853
		return -ENODEV;
892
		return -ENODEV;
854
	}
893
	}
855
 
894
 
856
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
895
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
857
		return 0;
896
		return 0;
858
 
897
 
-
 
898
	return i915_drm_freeze(drm_dev);
-
 
899
}
-
 
900
 
-
 
901
static int i915_pm_suspend_late(struct device *dev)
-
 
902
{
-
 
903
	struct pci_dev *pdev = to_pci_dev(dev);
-
 
904
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-
 
905
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
-
 
906
 
-
 
907
	/*
-
 
908
	 * We have a suspedn ordering issue with the snd-hda driver also
-
 
909
	 * requiring our device to be power up. Due to the lack of a
859
	error = i915_drm_freeze(drm_dev);
910
	 * parent/child relationship we currently solve this with an late
-
 
911
	 * suspend hook.
-
 
912
	 *
-
 
913
	 * FIXME: This should be solved with a special hdmi sink device or
-
 
914
	 * similar so that power domains can be employed.
-
 
915
	 */
860
	if (error)
916
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
-
 
917
		return 0;
-
 
918
 
-
 
919
	if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
861
		return error;
920
		hsw_enable_pc8(dev_priv);
862
 
921
 
863
	pci_disable_device(pdev);
922
	pci_disable_device(pdev);
864
	pci_set_power_state(pdev, PCI_D3hot);
923
	pci_set_power_state(pdev, PCI_D3hot);
865
 
924
 
866
	return 0;
925
	return 0;
867
}
926
}
-
 
927
 
-
 
928
static int i915_pm_resume_early(struct device *dev)
-
 
929
{
-
 
930
	struct pci_dev *pdev = to_pci_dev(dev);
-
 
931
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-
 
932
 
-
 
933
	return i915_resume_early(drm_dev);
-
 
934
}
868
 
935
 
869
static int i915_pm_resume(struct device *dev)
936
static int i915_pm_resume(struct device *dev)
870
{
937
{
871
	struct pci_dev *pdev = to_pci_dev(dev);
938
	struct pci_dev *pdev = to_pci_dev(dev);
872
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
939
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
873
 
940
 
874
	return i915_resume(drm_dev);
941
	return i915_resume(drm_dev);
875
}
942
}
876
 
943
 
877
static int i915_pm_freeze(struct device *dev)
944
static int i915_pm_freeze(struct device *dev)
878
{
945
{
879
	struct pci_dev *pdev = to_pci_dev(dev);
946
	struct pci_dev *pdev = to_pci_dev(dev);
880
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
947
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
881
 
948
 
882
	if (!drm_dev || !drm_dev->dev_private) {
949
	if (!drm_dev || !drm_dev->dev_private) {
883
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
950
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
884
		return -ENODEV;
951
		return -ENODEV;
885
	}
952
	}
886
 
953
 
887
	return i915_drm_freeze(drm_dev);
954
	return i915_drm_freeze(drm_dev);
888
}
955
}
-
 
956
 
-
 
957
static int i915_pm_thaw_early(struct device *dev)
-
 
958
{
-
 
959
	struct pci_dev *pdev = to_pci_dev(dev);
-
 
960
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-
 
961
 
-
 
962
	return i915_drm_thaw_early(drm_dev);
-
 
963
}
889
 
964
 
890
static int i915_pm_thaw(struct device *dev)
965
static int i915_pm_thaw(struct device *dev)
891
{
966
{
892
	struct pci_dev *pdev = to_pci_dev(dev);
967
	struct pci_dev *pdev = to_pci_dev(dev);
893
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
968
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
894
 
969
 
895
	return i915_drm_thaw(drm_dev);
970
	return i915_drm_thaw(drm_dev);
896
}
971
}
897
 
972
 
898
static int i915_pm_poweroff(struct device *dev)
973
static int i915_pm_poweroff(struct device *dev)
899
{
974
{
900
	struct pci_dev *pdev = to_pci_dev(dev);
975
	struct pci_dev *pdev = to_pci_dev(dev);
901
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
976
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
902
 
977
 
903
	return i915_drm_freeze(drm_dev);
978
	return i915_drm_freeze(drm_dev);
904
}
979
}
-
 
980
 
-
 
981
static int hsw_runtime_suspend(struct drm_i915_private *dev_priv)
-
 
982
{
-
 
983
	hsw_enable_pc8(dev_priv);
-
 
984
 
-
 
985
	return 0;
-
 
986
}
-
 
987
 
-
 
988
static int snb_runtime_resume(struct drm_i915_private *dev_priv)
-
 
989
{
-
 
990
	struct drm_device *dev = dev_priv->dev;
-
 
991
 
-
 
992
	intel_init_pch_refclk(dev);
-
 
993
 
-
 
994
	return 0;
-
 
995
}
-
 
996
 
-
 
997
static int hsw_runtime_resume(struct drm_i915_private *dev_priv)
-
 
998
{
-
 
999
	hsw_disable_pc8(dev_priv);
-
 
1000
 
-
 
1001
	return 0;
-
 
1002
}
-
 
1003
 
-
 
1004
/*
-
 
1005
 * Save all Gunit registers that may be lost after a D3 and a subsequent
-
 
1006
 * S0i[R123] transition. The list of registers needing a save/restore is
-
 
1007
 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
-
 
1008
 * registers in the following way:
-
 
1009
 * - Driver: saved/restored by the driver
-
 
1010
 * - Punit : saved/restored by the Punit firmware
-
 
1011
 * - No, w/o marking: no need to save/restore, since the register is R/O or
-
 
1012
 *                    used internally by the HW in a way that doesn't depend
-
 
1013
 *                    keeping the content across a suspend/resume.
-
 
1014
 * - Debug : used for debugging
-
 
1015
 *
-
 
1016
 * We save/restore all registers marked with 'Driver', with the following
-
 
1017
 * exceptions:
-
 
1018
 * - Registers out of use, including also registers marked with 'Debug'.
-
 
1019
 *   These have no effect on the driver's operation, so we don't save/restore
-
 
1020
 *   them to reduce the overhead.
-
 
1021
 * - Registers that are fully setup by an initialization function called from
-
 
1022
 *   the resume path. For example many clock gating and RPS/RC6 registers.
-
 
1023
 * - Registers that provide the right functionality with their reset defaults.
-
 
1024
 *
-
 
1025
 * TODO: Except for registers that based on the above 3 criteria can be safely
-
 
1026
 * ignored, we save/restore all others, practically treating the HW context as
-
 
1027
 * a black-box for the driver. Further investigation is needed to reduce the
-
 
1028
 * saved/restored registers even further, by following the same 3 criteria.
-
 
1029
 */
-
 
1030
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
-
 
1031
{
-
 
1032
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
-
 
1033
	int i;
-
 
1034
 
-
 
1035
	/* GAM 0x4000-0x4770 */
-
 
1036
	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
-
 
1037
	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
-
 
1038
	s->arb_mode		= I915_READ(ARB_MODE);
-
 
1039
	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
-
 
1040
	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);
-
 
1041
 
-
 
1042
	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
-
 
1043
		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
-
 
1044
 
-
 
1045
	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
-
 
1046
	s->gfx_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
-
 
1047
 
-
 
1048
	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
-
 
1049
	s->ecochk		= I915_READ(GAM_ECOCHK);
-
 
1050
	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
-
 
1051
	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);
-
 
1052
 
-
 
1053
	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);
-
 
1054
 
-
 
1055
	/* MBC 0x9024-0x91D0, 0x8500 */
-
 
1056
	s->g3dctl		= I915_READ(VLV_G3DCTL);
-
 
1057
	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
-
 
1058
	s->mbctl		= I915_READ(GEN6_MBCTL);
-
 
1059
 
-
 
1060
	/* GCP 0x9400-0x9424, 0x8100-0x810C */
-
 
1061
	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
-
 
1062
	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
-
 
1063
	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
-
 
1064
	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
-
 
1065
	s->rstctl		= I915_READ(GEN6_RSTCTL);
-
 
1066
	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);
-
 
1067
 
-
 
1068
	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
-
 
1069
	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
-
 
1070
	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
-
 
1071
	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
-
 
1072
	s->ecobus		= I915_READ(ECOBUS);
-
 
1073
	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
-
 
1074
	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
-
 
1075
	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
-
 
1076
	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
-
 
1077
	s->rcedata		= I915_READ(VLV_RCEDATA);
-
 
1078
	s->spare2gh		= I915_READ(VLV_SPAREG2H);
-
 
1079
 
-
 
1080
	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
-
 
1081
	s->gt_imr		= I915_READ(GTIMR);
-
 
1082
	s->gt_ier		= I915_READ(GTIER);
-
 
1083
	s->pm_imr		= I915_READ(GEN6_PMIMR);
-
 
1084
	s->pm_ier		= I915_READ(GEN6_PMIER);
-
 
1085
 
-
 
1086
	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
-
 
1087
		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
-
 
1088
 
-
 
1089
	/* GT SA CZ domain, 0x100000-0x138124 */
-
 
1090
	s->tilectl		= I915_READ(TILECTL);
-
 
1091
	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
-
 
1092
	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
-
 
1093
	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
-
 
1094
	s->pmwgicz		= I915_READ(VLV_PMWGICZ);
-
 
1095
 
-
 
1096
	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
-
 
1097
	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
-
 
1098
	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
-
 
1099
	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);
-
 
1100
 
-
 
1101
	/*
-
 
1102
	 * Not saving any of:
-
 
1103
	 * DFT,		0x9800-0x9EC0
-
 
1104
	 * SARB,	0xB000-0xB1FC
-
 
1105
	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
-
 
1106
	 * PCI CFG
-
 
1107
	 */
-
 
1108
}
-
 
1109
 
-
 
1110
static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
-
 
1111
{
-
 
1112
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
-
 
1113
	u32 val;
-
 
1114
	int i;
-
 
1115
 
-
 
1116
	/* GAM 0x4000-0x4770 */
-
 
1117
	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
-
 
1118
	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
-
 
1119
	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
-
 
1120
	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
-
 
1121
	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);
-
 
1122
 
-
 
1123
	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
-
 
1124
		I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
-
 
1125
 
-
 
1126
	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
-
 
1127
	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
-
 
1128
 
-
 
1129
	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
-
 
1130
	I915_WRITE(GAM_ECOCHK,		s->ecochk);
-
 
1131
	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
-
 
1132
	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);
-
 
1133
 
-
 
1134
	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);
-
 
1135
 
-
 
1136
	/* MBC 0x9024-0x91D0, 0x8500 */
-
 
1137
	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
-
 
1138
	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
-
 
1139
	I915_WRITE(GEN6_MBCTL,		s->mbctl);
-
 
1140
 
-
 
1141
	/* GCP 0x9400-0x9424, 0x8100-0x810C */
-
 
1142
	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
-
 
1143
	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
-
 
1144
	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
-
 
1145
	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
-
 
1146
	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
-
 
1147
	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);
-
 
1148
 
-
 
1149
	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
-
 
1150
	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
-
 
1151
	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
-
 
1152
	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
-
 
1153
	I915_WRITE(ECOBUS,		s->ecobus);
-
 
1154
	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
-
 
1155
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
-
 
1156
	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
-
 
1157
	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
-
 
1158
	I915_WRITE(VLV_RCEDATA,		s->rcedata);
-
 
1159
	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);
-
 
1160
 
-
 
1161
	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
-
 
1162
	I915_WRITE(GTIMR,		s->gt_imr);
-
 
1163
	I915_WRITE(GTIER,		s->gt_ier);
-
 
1164
	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
-
 
1165
	I915_WRITE(GEN6_PMIER,		s->pm_ier);
-
 
1166
 
-
 
1167
	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
-
 
1168
		I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
-
 
1169
 
-
 
1170
	/* GT SA CZ domain, 0x100000-0x138124 */
-
 
1171
	I915_WRITE(TILECTL,			s->tilectl);
-
 
1172
	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
-
 
1173
	/*
-
 
1174
	 * Preserve the GT allow wake and GFX force clock bit, they are not
-
 
1175
	 * be restored, as they are used to control the s0ix suspend/resume
-
 
1176
	 * sequence by the caller.
-
 
1177
	 */
-
 
1178
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
-
 
1179
	val &= VLV_GTLC_ALLOWWAKEREQ;
-
 
1180
	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
-
 
1181
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
-
 
1182
 
-
 
1183
	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
-
 
1184
	val &= VLV_GFX_CLK_FORCE_ON_BIT;
-
 
1185
	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
-
 
1186
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
-
 
1187
 
-
 
1188
	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);
-
 
1189
 
-
 
1190
	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
-
 
1191
	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
-
 
1192
	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
-
 
1193
	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
-
 
1194
}
-
 
1195
#endif
-
 
1196
 
-
 
1197
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
-
 
1198
{
-
 
1199
	u32 val;
-
 
1200
	int err;
-
 
1201
 
-
 
1202
	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
-
 
1203
	WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
-
 
1204
 
-
 
1205
#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
-
 
1206
	/* Wait for a previous force-off to settle */
-
 
1207
	if (force_on) {
-
 
1208
		err = wait_for(!COND, 20);
-
 
1209
		if (err) {
-
 
1210
			DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
-
 
1211
				  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
-
 
1212
			return err;
-
 
1213
		}
-
 
1214
	}
-
 
1215
 
-
 
1216
	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
-
 
1217
	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
-
 
1218
	if (force_on)
-
 
1219
		val |= VLV_GFX_CLK_FORCE_ON_BIT;
-
 
1220
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
-
 
1221
 
-
 
1222
	if (!force_on)
-
 
1223
		return 0;
-
 
1224
 
-
 
1225
	err = wait_for(COND, 20);
-
 
1226
	if (err)
-
 
1227
		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
-
 
1228
			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
-
 
1229
 
-
 
1230
	return err;
-
 
1231
#undef COND
-
 
1232
}
-
 
1233
#if 0
-
 
1234
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
-
 
1235
{
-
 
1236
	u32 val;
-
 
1237
	int err = 0;
-
 
1238
 
-
 
1239
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
-
 
1240
	val &= ~VLV_GTLC_ALLOWWAKEREQ;
-
 
1241
	if (allow)
-
 
1242
		val |= VLV_GTLC_ALLOWWAKEREQ;
-
 
1243
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
-
 
1244
	POSTING_READ(VLV_GTLC_WAKE_CTRL);
-
 
1245
 
-
 
1246
#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
-
 
1247
	      allow)
-
 
1248
	err = wait_for(COND, 1);
-
 
1249
	if (err)
-
 
1250
		DRM_ERROR("timeout disabling GT waking\n");
-
 
1251
	return err;
-
 
1252
#undef COND
-
 
1253
}
-
 
1254
 
-
 
1255
static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
-
 
1256
				 bool wait_for_on)
-
 
1257
{
-
 
1258
	u32 mask;
-
 
1259
	u32 val;
-
 
1260
	int err;
-
 
1261
 
-
 
1262
	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
-
 
1263
	val = wait_for_on ? mask : 0;
-
 
1264
#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
-
 
1265
	if (COND)
-
 
1266
		return 0;
-
 
1267
 
-
 
1268
	DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
-
 
1269
			wait_for_on ? "on" : "off",
-
 
1270
			I915_READ(VLV_GTLC_PW_STATUS));
-
 
1271
 
-
 
1272
	/*
-
 
1273
	 * RC6 transitioning can be delayed up to 2 msec (see
-
 
1274
	 * valleyview_enable_rps), use 3 msec for safety.
-
 
1275
	 */
-
 
1276
	err = wait_for(COND, 3);
-
 
1277
	if (err)
-
 
1278
		DRM_ERROR("timeout waiting for GT wells to go %s\n",
-
 
1279
			  wait_for_on ? "on" : "off");
-
 
1280
 
-
 
1281
	return err;
-
 
1282
#undef COND
-
 
1283
}
-
 
1284
 
-
 
1285
static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
-
 
1286
{
-
 
1287
	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
-
 
1288
		return;
-
 
1289
 
-
 
1290
	DRM_ERROR("GT register access while GT waking disabled\n");
-
 
1291
	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
-
 
1292
}
-
 
1293
 
-
 
1294
static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
-
 
1295
{
-
 
1296
	u32 mask;
-
 
1297
	int err;
-
 
1298
 
-
 
1299
	/*
-
 
1300
	 * Bspec defines the following GT well on flags as debug only, so
-
 
1301
	 * don't treat them as hard failures.
-
 
1302
	 */
-
 
1303
	(void)vlv_wait_for_gt_wells(dev_priv, false);
-
 
1304
 
-
 
1305
	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
-
 
1306
	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
-
 
1307
 
-
 
1308
	vlv_check_no_gt_access(dev_priv);
-
 
1309
 
-
 
1310
	err = vlv_force_gfx_clock(dev_priv, true);
-
 
1311
	if (err)
-
 
1312
		goto err1;
-
 
1313
 
-
 
1314
	err = vlv_allow_gt_wake(dev_priv, false);
-
 
1315
	if (err)
-
 
1316
		goto err2;
-
 
1317
	vlv_save_gunit_s0ix_state(dev_priv);
-
 
1318
 
-
 
1319
	err = vlv_force_gfx_clock(dev_priv, false);
-
 
1320
	if (err)
-
 
1321
		goto err2;
-
 
1322
 
-
 
1323
	return 0;
-
 
1324
 
-
 
1325
err2:
-
 
1326
	/* For safety always re-enable waking and disable gfx clock forcing */
-
 
1327
	vlv_allow_gt_wake(dev_priv, true);
-
 
1328
err1:
-
 
1329
	vlv_force_gfx_clock(dev_priv, false);
-
 
1330
 
-
 
1331
	return err;
-
 
1332
}
-
 
1333
 
-
 
1334
static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
-
 
1335
{
-
 
1336
	struct drm_device *dev = dev_priv->dev;
-
 
1337
	int err;
-
 
1338
	int ret;
-
 
1339
 
-
 
1340
	/*
-
 
1341
	 * If any of the steps fail just try to continue, that's the best we
-
 
1342
	 * can do at this point. Return the first error code (which will also
-
 
1343
	 * leave RPM permanently disabled).
-
 
1344
	 */
-
 
1345
	ret = vlv_force_gfx_clock(dev_priv, true);
-
 
1346
 
-
 
1347
	vlv_restore_gunit_s0ix_state(dev_priv);
-
 
1348
 
-
 
1349
	err = vlv_allow_gt_wake(dev_priv, true);
-
 
1350
	if (!ret)
-
 
1351
		ret = err;
-
 
1352
 
-
 
1353
	err = vlv_force_gfx_clock(dev_priv, false);
-
 
1354
	if (!ret)
-
 
1355
		ret = err;
-
 
1356
 
-
 
1357
	vlv_check_no_gt_access(dev_priv);
-
 
1358
 
-
 
1359
	intel_init_clock_gating(dev);
-
 
1360
	i915_gem_restore_fences(dev);
-
 
1361
 
-
 
1362
	return ret;
-
 
1363
}
-
 
1364
 
-
 
1365
static int intel_runtime_suspend(struct device *device)
-
 
1366
{
-
 
1367
	struct pci_dev *pdev = to_pci_dev(device);
-
 
1368
	struct drm_device *dev = pci_get_drvdata(pdev);
-
 
1369
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1370
	int ret;
-
 
1371
 
-
 
1372
	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
-
 
1373
		return -ENODEV;
-
 
1374
 
-
 
1375
	WARN_ON(!HAS_RUNTIME_PM(dev));
-
 
1376
	assert_force_wake_inactive(dev_priv);
-
 
1377
 
-
 
1378
	DRM_DEBUG_KMS("Suspending device\n");
-
 
1379
 
-
 
1380
	/*
-
 
1381
	 * We could deadlock here in case another thread holding struct_mutex
-
 
1382
	 * calls RPM suspend concurrently, since the RPM suspend will wait
-
 
1383
	 * first for this RPM suspend to finish. In this case the concurrent
-
 
1384
	 * RPM resume will be followed by its RPM suspend counterpart. Still
-
 
1385
	 * for consistency return -EAGAIN, which will reschedule this suspend.
-
 
1386
	 */
-
 
1387
	if (!mutex_trylock(&dev->struct_mutex)) {
-
 
1388
		DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
-
 
1389
		/*
-
 
1390
		 * Bump the expiration timestamp, otherwise the suspend won't
-
 
1391
		 * be rescheduled.
-
 
1392
		 */
-
 
1393
		pm_runtime_mark_last_busy(device);
-
 
1394
 
-
 
1395
		return -EAGAIN;
-
 
1396
	}
-
 
1397
	/*
-
 
1398
	 * We are safe here against re-faults, since the fault handler takes
-
 
1399
	 * an RPM reference.
-
 
1400
	 */
-
 
1401
	i915_gem_release_all_mmaps(dev_priv);
-
 
1402
	mutex_unlock(&dev->struct_mutex);
-
 
1403
 
-
 
1404
	/*
-
 
1405
	 * rps.work can't be rearmed here, since we get here only after making
-
 
1406
	 * sure the GPU is idle and the RPS freq is set to the minimum. See
-
 
1407
	 * intel_mark_idle().
-
 
1408
	 */
-
 
1409
	cancel_work_sync(&dev_priv->rps.work);
-
 
1410
	intel_runtime_pm_disable_interrupts(dev);
-
 
1411
 
-
 
1412
	if (IS_GEN6(dev)) {
-
 
1413
		ret = 0;
-
 
1414
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
-
 
1415
		ret = hsw_runtime_suspend(dev_priv);
-
 
1416
	} else if (IS_VALLEYVIEW(dev)) {
-
 
1417
		ret = vlv_runtime_suspend(dev_priv);
-
 
1418
	} else {
-
 
1419
		ret = -ENODEV;
-
 
1420
		WARN_ON(1);
-
 
1421
	}
-
 
1422
 
-
 
1423
	if (ret) {
-
 
1424
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
-
 
1425
		intel_runtime_pm_restore_interrupts(dev);
-
 
1426
 
-
 
1427
		return ret;
-
 
1428
	}
-
 
1429
 
-
 
1430
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
-
 
1431
	dev_priv->pm.suspended = true;
-
 
1432
 
-
 
1433
	/*
-
 
1434
	 * current versions of firmware which depend on this opregion
-
 
1435
	 * notification have repurposed the D1 definition to mean
-
 
1436
	 * "runtime suspended" vs. what you would normally expect (D3)
-
 
1437
	 * to distinguish it from notifications that might be sent
-
 
1438
	 * via the suspend path.
-
 
1439
	 */
-
 
1440
	intel_opregion_notify_adapter(dev, PCI_D1);
-
 
1441
 
-
 
1442
	DRM_DEBUG_KMS("Device suspended\n");
-
 
1443
	return 0;
-
 
1444
}
-
 
1445
 
-
 
1446
static int intel_runtime_resume(struct device *device)
-
 
1447
{
-
 
1448
	struct pci_dev *pdev = to_pci_dev(device);
-
 
1449
	struct drm_device *dev = pci_get_drvdata(pdev);
-
 
1450
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1451
	int ret;
-
 
1452
 
-
 
1453
	WARN_ON(!HAS_RUNTIME_PM(dev));
-
 
1454
 
-
 
1455
	DRM_DEBUG_KMS("Resuming device\n");
-
 
1456
 
-
 
1457
	intel_opregion_notify_adapter(dev, PCI_D0);
-
 
1458
	dev_priv->pm.suspended = false;
-
 
1459
 
-
 
1460
	if (IS_GEN6(dev)) {
-
 
1461
		ret = snb_runtime_resume(dev_priv);
-
 
1462
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
-
 
1463
		ret = hsw_runtime_resume(dev_priv);
-
 
1464
	} else if (IS_VALLEYVIEW(dev)) {
-
 
1465
		ret = vlv_runtime_resume(dev_priv);
-
 
1466
	} else {
-
 
1467
		WARN_ON(1);
-
 
1468
		ret = -ENODEV;
-
 
1469
	}
-
 
1470
 
-
 
1471
	/*
-
 
1472
	 * No point of rolling back things in case of an error, as the best
-
 
1473
	 * we can do is to hope that things will still work (and disable RPM).
-
 
1474
	 */
-
 
1475
	i915_gem_init_swizzling(dev);
-
 
1476
	gen6_update_ring_freq(dev);
-
 
1477
 
-
 
1478
	intel_runtime_pm_restore_interrupts(dev);
-
 
1479
	intel_reset_gt_powersave(dev);
-
 
1480
 
-
 
1481
	if (ret)
-
 
1482
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
-
 
1483
	else
-
 
1484
		DRM_DEBUG_KMS("Device resumed\n");
-
 
1485
 
-
 
1486
	return ret;
-
 
1487
}
-
 
1488
 
-
 
1489
static const struct dev_pm_ops i915_pm_ops = {
-
 
1490
	.suspend = i915_pm_suspend,
-
 
1491
	.suspend_late = i915_pm_suspend_late,
-
 
1492
	.resume_early = i915_pm_resume_early,
-
 
1493
	.resume = i915_pm_resume,
-
 
1494
	.freeze = i915_pm_freeze,
-
 
1495
	.thaw_early = i915_pm_thaw_early,
-
 
1496
	.thaw = i915_pm_thaw,
-
 
1497
	.poweroff = i915_pm_poweroff,
-
 
1498
	.restore_early = i915_pm_resume_early,
-
 
1499
	.restore = i915_pm_resume,
-
 
1500
	.runtime_suspend = intel_runtime_suspend,
-
 
1501
	.runtime_resume = intel_runtime_resume,
-
 
1502
};
-
 
1503
 
-
 
1504
static const struct vm_operations_struct i915_gem_vm_ops = {
-
 
1505
	.fault = i915_gem_fault,
-
 
1506
	.open = drm_gem_vm_open,
-
 
1507
	.close = drm_gem_vm_close,
-
 
1508
};
-
 
1509
 
-
 
1510
static const struct file_operations i915_driver_fops = {
-
 
1511
	.owner = THIS_MODULE,
-
 
1512
	.open = drm_open,
-
 
1513
	.release = drm_release,
-
 
1514
	.unlocked_ioctl = drm_ioctl,
-
 
1515
	.mmap = drm_gem_mmap,
-
 
1516
	.poll = drm_poll,
-
 
1517
	.read = drm_read,
-
 
1518
#ifdef CONFIG_COMPAT
-
 
1519
	.compat_ioctl = i915_compat_ioctl,
-
 
1520
#endif
905
 
1521
	.llseek = noop_llseek,
906
 
1522
};
907
#endif
1523
#endif
908
 
1524
 
909
static struct drm_driver driver = {
1525
static struct drm_driver driver = {
910
    /* Don't use MTRRs here; the Xserver or userspace app should
1526
    /* Don't use MTRRs here; the Xserver or userspace app should
911
     * deal with them for Intel hardware.
1527
     * deal with them for Intel hardware.
912
     */
1528
     */
913
    .driver_features =
1529
    .driver_features =
914
	    DRIVER_USE_AGP |
1530
	    DRIVER_USE_AGP |
915
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1531
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
916
	    DRIVER_RENDER,
1532
	    DRIVER_RENDER,
917
    .load = i915_driver_load,
1533
    .load = i915_driver_load,
918
//    .unload = i915_driver_unload,
1534
//    .unload = i915_driver_unload,
919
      .open = i915_driver_open,
1535
      .open = i915_driver_open,
920
//    .lastclose = i915_driver_lastclose,
1536
//    .lastclose = i915_driver_lastclose,
921
//    .preclose = i915_driver_preclose,
1537
//    .preclose = i915_driver_preclose,
922
//    .postclose = i915_driver_postclose,
1538
//    .postclose = i915_driver_postclose,
923
 
1539
 
924
    /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
1540
    /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
925
//    .suspend = i915_suspend,
1541
//    .suspend = i915_suspend,
926
//    .resume = i915_resume,
1542
//    .resume = i915_resume,
927
 
1543
 
928
//    .device_is_agp = i915_driver_device_is_agp,
1544
//    .device_is_agp = i915_driver_device_is_agp,
929
//    .master_create = i915_master_create,
1545
//    .master_create = i915_master_create,
930
//    .master_destroy = i915_master_destroy,
1546
//    .master_destroy = i915_master_destroy,
931
#if defined(CONFIG_DEBUG_FS)
1547
#if defined(CONFIG_DEBUG_FS)
932
	.debugfs_init = i915_debugfs_init,
1548
	.debugfs_init = i915_debugfs_init,
933
	.debugfs_cleanup = i915_debugfs_cleanup,
1549
	.debugfs_cleanup = i915_debugfs_cleanup,
934
#endif
1550
#endif
935
    .gem_free_object = i915_gem_free_object,
1551
    .gem_free_object = i915_gem_free_object,
936
 
1552
 
937
//    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1553
//    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
938
//    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1554
//    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
939
//    .gem_prime_export = i915_gem_prime_export,
1555
//    .gem_prime_export = i915_gem_prime_export,
940
//    .gem_prime_import = i915_gem_prime_import,
1556
//    .gem_prime_import = i915_gem_prime_import,
941
 
1557
 
942
//    .dumb_create = i915_gem_dumb_create,
1558
//    .dumb_create = i915_gem_dumb_create,
943
//    .dumb_map_offset = i915_gem_mmap_gtt,
1559
//    .dumb_map_offset = i915_gem_mmap_gtt,
944
//    .dumb_destroy = i915_gem_dumb_destroy,
1560
//    .dumb_destroy = i915_gem_dumb_destroy,
945
//    .ioctls = i915_ioctls,
1561
//    .ioctls = i915_ioctls,
946
//    .fops = &i915_driver_fops,
1562
//    .fops = &i915_driver_fops,
947
//    .name = DRIVER_NAME,
1563
//    .name = DRIVER_NAME,
948
//    .desc = DRIVER_DESC,
1564
//    .desc = DRIVER_DESC,
949
//    .date = DRIVER_DATE,
1565
//    .date = DRIVER_DATE,
950
//    .major = DRIVER_MAJOR,
1566
//    .major = DRIVER_MAJOR,
951
//    .minor = DRIVER_MINOR,
1567
//    .minor = DRIVER_MINOR,
952
//    .patchlevel = DRIVER_PATCHLEVEL,
1568
//    .patchlevel = DRIVER_PATCHLEVEL,
953
};
1569
};
954
 
1570
 
955
 
1571
 
956
 
1572
 
957
 
1573
 
958
int i915_init(void)
1574
int i915_init(void)
959
{
1575
{
960
    static pci_dev_t device;
1576
    static pci_dev_t device;
961
    const struct pci_device_id  *ent;
1577
    const struct pci_device_id  *ent;
962
    int  err;
1578
    int  err;
963
 
1579
 
964
    ent = find_pci_device(&device, pciidlist);
1580
    ent = find_pci_device(&device, pciidlist);
965
    if( unlikely(ent == NULL) )
1581
    if( unlikely(ent == NULL) )
966
    {
1582
    {
967
        dbgprintf("device not found\n");
1583
        dbgprintf("device not found\n");
968
        return -ENODEV;
1584
        return -ENODEV;
969
    };
1585
    };
970
 
1586
 
971
    drm_core_init();
1587
    drm_core_init();
972
 
1588
 
973
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
1589
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
974
                                device.pci_dev.device);
1590
                                device.pci_dev.device);
975
 
1591
 
976
    driver.driver_features |= DRIVER_MODESET;
1592
    driver.driver_features |= DRIVER_MODESET;
977
 
1593
 
978
    err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
1594
    err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
979
 
1595
 
980
    return err;
1596
    return err;
981
}
1597
}