Subversion Repositories Kolibri OS

Rev

Rev 4104 | Rev 4280 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2325 Serge 1
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2
 */
3
/*
4
 *
5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6
 * All Rights Reserved.
7
 *
8
 * Permission is hereby granted, free of charge, to any person obtaining a
9
 * copy of this software and associated documentation files (the
10
 * "Software"), to deal in the Software without restriction, including
11
 * without limitation the rights to use, copy, modify, merge, publish,
12
 * distribute, sub license, and/or sell copies of the Software, and to
13
 * permit persons to whom the Software is furnished to do so, subject to
14
 * the following conditions:
15
 *
16
 * The above copyright notice and this permission notice (including the
17
 * next paragraph) shall be included in all copies or substantial portions
18
 * of the Software.
19
 *
20
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
 *
28
 */
29
 
2330 Serge 30
//#include 
3031 serge 31
#include 
32
#include 
2330 Serge 33
#include "i915_drv.h"
4126 Serge 34
#include "i915_trace.h"
2330 Serge 35
#include "intel_drv.h"
2325 Serge 36
 
37
#include 
38
#include 
39
#include 
40
#include 
41
 
3031 serge 42
#include 
43
 
2325 Serge 44
#include 
45
 
2330 Serge 46
#define __read_mostly
2327 Serge 47
 
2338 Serge 48
int init_display_kms(struct drm_device *dev);
2330 Serge 49
 
3031 serge 50
static int i915_modeset __read_mostly = 1;
3480 Serge 51
module_param_named(modeset, i915_modeset, int, 0400);
3031 serge 52
MODULE_PARM_DESC(modeset,
53
		"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
54
		"1=on, -1=force vga console preference [default])");
55
 
3746 Serge 56
unsigned int i915_fbpercrtc __always_unused = 0;
57
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
3031 serge 58
 
3480 Serge 59
int i915_panel_ignore_lid __read_mostly         =  1;
60
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
3031 serge 61
MODULE_PARM_DESC(panel_ignore_lid,
3480 Serge 62
		"Override lid status (0=autodetect, 1=autodetect disabled [default], "
63
		"-1=force lid closed, -2=force lid open)");
2330 Serge 64
 
3482 Serge 65
unsigned int i915_powersave __read_mostly = 1;
3480 Serge 66
module_param_named(powersave, i915_powersave, int, 0600);
3031 serge 67
MODULE_PARM_DESC(powersave,
68
		"Enable powersavings, fbc, downclocking, etc. (default: true)");
2330 Serge 69
 
3031 serge 70
int i915_semaphores __read_mostly = -1;
3480 Serge 71
module_param_named(semaphores, i915_semaphores, int, 0600);
3031 serge 72
MODULE_PARM_DESC(semaphores,
73
		"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
2330 Serge 74
 
3482 Serge 75
int i915_enable_rc6 __read_mostly = -1;
3480 Serge 76
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
3031 serge 77
MODULE_PARM_DESC(i915_enable_rc6,
78
		"Enable power-saving render C-state 6. "
79
		"Different stages can be selected via bitmask values "
80
		"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
81
		"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
82
		"default: -1 (use per-chip default)");
83
 
3482 Serge 84
int i915_enable_fbc __read_mostly = -1;
3480 Serge 85
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
3031 serge 86
MODULE_PARM_DESC(i915_enable_fbc,
87
		"Enable frame buffer compression for power savings "
88
		"(default: -1 (use per-chip default))");
89
 
2330 Serge 90
unsigned int i915_lvds_downclock  __read_mostly =  0;
3480 Serge 91
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
3031 serge 92
MODULE_PARM_DESC(lvds_downclock,
93
		"Use panel (LVDS/eDP) downclocking for power savings "
94
		"(default: false)");
2330 Serge 95
 
3031 serge 96
int i915_lvds_channel_mode __read_mostly;
3480 Serge 97
module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
3031 serge 98
MODULE_PARM_DESC(lvds_channel_mode,
99
		 "Specify LVDS channel mode "
100
		 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
2330 Serge 101
 
3031 serge 102
int i915_panel_use_ssc __read_mostly = -1;
3480 Serge 103
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
3031 serge 104
MODULE_PARM_DESC(lvds_use_ssc,
105
		"Use Spread Spectrum Clock with panels [LVDS/eDP] "
106
		"(default: auto from VBT)");
107
 
2332 Serge 108
int i915_vbt_sdvo_panel_type __read_mostly      = -1;
3480 Serge 109
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
3031 serge 110
MODULE_PARM_DESC(vbt_sdvo_panel_type,
111
		"Override/Ignore selection of SDVO panel mode in the VBT "
112
		"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
2330 Serge 113
 
3031 serge 114
static bool i915_try_reset __read_mostly = true;
3480 Serge 115
module_param_named(reset, i915_try_reset, bool, 0600);
3031 serge 116
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
117
 
118
bool i915_enable_hangcheck __read_mostly = false;
3480 Serge 119
module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
3031 serge 120
MODULE_PARM_DESC(enable_hangcheck,
121
		"Periodically check GPU activity for detecting hangs. "
122
		"WARNING: Disabling this can cause system wide hangs. "
123
		"(default: true)");
124
 
3746 Serge 125
int i915_enable_ppgtt __read_mostly = 0;
3480 Serge 126
module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
3031 serge 127
MODULE_PARM_DESC(i915_enable_ppgtt,
128
		"Enable PPGTT (default: true)");
129
 
4104 Serge 130
int i915_enable_psr __read_mostly = 0;
131
module_param_named(enable_psr, i915_enable_psr, int, 0600);
132
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
133
 
134
unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
3480 Serge 135
module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
3031 serge 136
MODULE_PARM_DESC(preliminary_hw_support,
4104 Serge 137
		"Enable preliminary hardware support.");
3031 serge 138
 
4126 Serge 139
int i915_disable_power_well __read_mostly = 1;
3482 Serge 140
module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
141
MODULE_PARM_DESC(disable_power_well,
4104 Serge 142
		 "Disable the power well when possible (default: true)");
3031 serge 143
 
4104 Serge 144
int i915_enable_ips __read_mostly = 1;
145
module_param_named(enable_ips, i915_enable_ips, int, 0600);
146
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
147
 
148
bool i915_fastboot __read_mostly = 0;
149
module_param_named(fastboot, i915_fastboot, bool, 0600);
150
MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
151
		 "(default: false)");
152
 
4126 Serge 153
int i915_enable_pc8 __read_mostly = 1;
4104 Serge 154
module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
155
MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
156
 
157
int i915_pc8_timeout __read_mostly = 5000;
158
module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
159
MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
160
 
161
bool i915_prefault_disable __read_mostly;
162
module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
163
MODULE_PARM_DESC(prefault_disable,
164
		"Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
165
 
166
static struct drm_driver driver;
167
extern int intel_agp_enabled;
168
 
2326 Serge 169
#define PCI_VENDOR_ID_INTEL        0x8086
170
 
2325 Serge 171
 
2339 Serge 172
static const struct intel_device_info intel_i915g_info = {
3746 Serge 173
	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
2339 Serge 174
	.has_overlay = 1, .overlay_needs_physical = 1,
175
};
176
static const struct intel_device_info intel_i915gm_info = {
3746 Serge 177
	.gen = 3, .is_mobile = 1, .num_pipes = 2,
2339 Serge 178
	.cursor_needs_physical = 1,
179
	.has_overlay = 1, .overlay_needs_physical = 1,
180
	.supports_tv = 1,
181
};
182
static const struct intel_device_info intel_i945g_info = {
3746 Serge 183
	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
2339 Serge 184
	.has_overlay = 1, .overlay_needs_physical = 1,
185
};
186
static const struct intel_device_info intel_i945gm_info = {
3746 Serge 187
	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
2339 Serge 188
	.has_hotplug = 1, .cursor_needs_physical = 1,
189
	.has_overlay = 1, .overlay_needs_physical = 1,
190
	.supports_tv = 1,
191
};
192
 
193
static const struct intel_device_info intel_i965g_info = {
3746 Serge 194
	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
2339 Serge 195
	.has_hotplug = 1,
196
	.has_overlay = 1,
197
};
198
 
199
static const struct intel_device_info intel_i965gm_info = {
3746 Serge 200
	.gen = 4, .is_crestline = 1, .num_pipes = 2,
2339 Serge 201
	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
202
	.has_overlay = 1,
203
	.supports_tv = 1,
204
};
205
 
206
static const struct intel_device_info intel_g33_info = {
3746 Serge 207
	.gen = 3, .is_g33 = 1, .num_pipes = 2,
2339 Serge 208
	.need_gfx_hws = 1, .has_hotplug = 1,
209
	.has_overlay = 1,
210
};
211
 
212
static const struct intel_device_info intel_g45_info = {
3746 Serge 213
	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
2339 Serge 214
	.has_pipe_cxsr = 1, .has_hotplug = 1,
215
	.has_bsd_ring = 1,
216
};
217
 
218
static const struct intel_device_info intel_gm45_info = {
3746 Serge 219
	.gen = 4, .is_g4x = 1, .num_pipes = 2,
2339 Serge 220
	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
221
	.has_pipe_cxsr = 1, .has_hotplug = 1,
222
	.supports_tv = 1,
223
	.has_bsd_ring = 1,
224
};
225
 
226
static const struct intel_device_info intel_pineview_info = {
3746 Serge 227
	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
2339 Serge 228
	.need_gfx_hws = 1, .has_hotplug = 1,
229
	.has_overlay = 1,
230
};
231
 
232
static const struct intel_device_info intel_ironlake_d_info = {
3746 Serge 233
	.gen = 5, .num_pipes = 2,
3031 serge 234
	.need_gfx_hws = 1, .has_hotplug = 1,
2339 Serge 235
	.has_bsd_ring = 1,
236
};
237
 
238
static const struct intel_device_info intel_ironlake_m_info = {
3746 Serge 239
	.gen = 5, .is_mobile = 1, .num_pipes = 2,
2339 Serge 240
	.need_gfx_hws = 1, .has_hotplug = 1,
241
	.has_fbc = 1,
242
	.has_bsd_ring = 1,
243
};
244
 
2325 Serge 245
static const struct intel_device_info intel_sandybridge_d_info = {
3746 Serge 246
	.gen = 6, .num_pipes = 2,
2330 Serge 247
	.need_gfx_hws = 1, .has_hotplug = 1,
2325 Serge 248
    .has_bsd_ring = 1,
249
    .has_blt_ring = 1,
3031 serge 250
	.has_llc = 1,
251
	.has_force_wake = 1,
2325 Serge 252
};
253
 
254
static const struct intel_device_info intel_sandybridge_m_info = {
3746 Serge 255
	.gen = 6, .is_mobile = 1, .num_pipes = 2,
2330 Serge 256
	.need_gfx_hws = 1, .has_hotplug = 1,
2325 Serge 257
    .has_fbc      = 1,
258
    .has_bsd_ring = 1,
259
    .has_blt_ring = 1,
3031 serge 260
	.has_llc = 1,
261
	.has_force_wake = 1,
2325 Serge 262
};
263
 
3746 Serge 264
#define GEN7_FEATURES  \
265
	.gen = 7, .num_pipes = 3, \
266
	.need_gfx_hws = 1, .has_hotplug = 1, \
267
	.has_bsd_ring = 1, \
268
	.has_blt_ring = 1, \
269
	.has_llc = 1, \
270
	.has_force_wake = 1
271
 
2339 Serge 272
static const struct intel_device_info intel_ivybridge_d_info = {
3746 Serge 273
	GEN7_FEATURES,
274
	.is_ivybridge = 1,
2339 Serge 275
};
2325 Serge 276
 
2339 Serge 277
static const struct intel_device_info intel_ivybridge_m_info = {
3746 Serge 278
	GEN7_FEATURES,
279
	.is_ivybridge = 1,
280
	.is_mobile = 1,
4104 Serge 281
	.has_fbc = 1,
2339 Serge 282
};
283
 
3746 Serge 284
static const struct intel_device_info intel_ivybridge_q_info = {
285
	GEN7_FEATURES,
286
	.is_ivybridge = 1,
287
	.num_pipes = 0, /* legal, last one wins */
288
};
289
 
3031 serge 290
static const struct intel_device_info intel_valleyview_m_info = {
3746 Serge 291
	GEN7_FEATURES,
292
	.is_mobile = 1,
293
	.num_pipes = 2,
3031 serge 294
	.is_valleyview = 1,
3480 Serge 295
	.display_mmio_offset = VLV_DISPLAY_BASE,
3746 Serge 296
	.has_llc = 0, /* legal, last one wins */
3031 serge 297
};
298
 
299
static const struct intel_device_info intel_valleyview_d_info = {
3746 Serge 300
	GEN7_FEATURES,
301
	.num_pipes = 2,
3031 serge 302
	.is_valleyview = 1,
3480 Serge 303
	.display_mmio_offset = VLV_DISPLAY_BASE,
3746 Serge 304
	.has_llc = 0, /* legal, last one wins */
3031 serge 305
};
306
 
307
static const struct intel_device_info intel_haswell_d_info = {
3746 Serge 308
	GEN7_FEATURES,
309
	.is_haswell = 1,
4104 Serge 310
	.has_ddi = 1,
311
	.has_fpga_dbg = 1,
312
	.has_vebox_ring = 1,
3031 serge 313
};
314
 
315
static const struct intel_device_info intel_haswell_m_info = {
3746 Serge 316
	GEN7_FEATURES,
317
	.is_haswell = 1,
318
	.is_mobile = 1,
4104 Serge 319
	.has_ddi = 1,
320
	.has_fpga_dbg = 1,
321
	.has_fbc = 1,
322
	.has_vebox_ring = 1,
3031 serge 323
};
324
 
4104 Serge 325
/*
326
 * Make sure any device matches here are from most specific to most
327
 * general.  For example, since the Quanta match is based on the subsystem
328
 * and subvendor IDs, we need it to come before the more general IVB
329
 * PCI ID matches, otherwise we'll use the wrong info struct above.
330
 */
331
#define INTEL_PCI_IDS \
332
	INTEL_I915G_IDS(&intel_i915g_info),	\
333
	INTEL_I915GM_IDS(&intel_i915gm_info),	\
334
	INTEL_I945G_IDS(&intel_i945g_info),	\
335
	INTEL_I945GM_IDS(&intel_i945gm_info),	\
336
	INTEL_I965G_IDS(&intel_i965g_info),	\
337
	INTEL_G33_IDS(&intel_g33_info),		\
338
	INTEL_I965GM_IDS(&intel_i965gm_info),	\
339
	INTEL_GM45_IDS(&intel_gm45_info), 	\
340
	INTEL_G45_IDS(&intel_g45_info), 	\
341
	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
342
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
343
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
344
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
345
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
346
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
347
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
348
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
349
	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
350
	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
351
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
352
	INTEL_VLV_D_IDS(&intel_valleyview_d_info)
353
 
2325 Serge 354
static const struct pci_device_id pciidlist[] = {       /* aka */
4104 Serge 355
	INTEL_PCI_IDS,
2325 Serge 356
    {0, 0, 0}
357
};
358
 
2326 Serge 359
#define INTEL_PCH_DEVICE_ID_MASK        0xff00
360
#define INTEL_PCH_IBX_DEVICE_ID_TYPE    0x3b00
361
#define INTEL_PCH_CPT_DEVICE_ID_TYPE    0x1c00
362
#define INTEL_PCH_PPT_DEVICE_ID_TYPE    0x1e00
3031 serge 363
#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
2325 Serge 364
 
2342 Serge 365
void intel_detect_pch(struct drm_device *dev)
2326 Serge 366
{
367
    struct drm_i915_private *dev_priv = dev->dev_private;
368
    struct pci_dev *pch;
369
 
3746 Serge 370
	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
371
	 * (which really amounts to a PCH but no South Display).
372
	 */
373
	if (INTEL_INFO(dev)->num_pipes == 0) {
374
		dev_priv->pch_type = PCH_NOP;
375
		return;
376
	}
377
 
2326 Serge 378
    /*
379
     * The reason to probe ISA bridge instead of Dev31:Fun0 is to
380
     * make graphics device passthrough work easy for VMM, that only
381
     * need to expose ISA bridge to let driver know the real hardware
382
     * underneath. This is a requirement from virtualization team.
4104 Serge 383
	 *
384
	 * In some virtualized environments (e.g. XEN), there is irrelevant
385
	 * ISA bridge in the system. To work reliably, we should scan trhough
386
	 * all the ISA bridge devices and check for the first match, instead
387
	 * of only checking the first one.
2326 Serge 388
     */
389
    pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
4104 Serge 390
	while (pch) {
391
		struct pci_dev *curr = pch;
2326 Serge 392
        if (pch->vendor == PCI_VENDOR_ID_INTEL) {
3243 Serge 393
			unsigned short id;
2326 Serge 394
            id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
3243 Serge 395
			dev_priv->pch_id = id;
2326 Serge 396
 
397
            if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
398
                dev_priv->pch_type = PCH_IBX;
399
                DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
3243 Serge 400
				WARN_ON(!IS_GEN5(dev));
2326 Serge 401
            } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
402
                dev_priv->pch_type = PCH_CPT;
403
                DRM_DEBUG_KMS("Found CougarPoint PCH\n");
3243 Serge 404
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
2326 Serge 405
            } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
406
                /* PantherPoint is CPT compatible */
407
                dev_priv->pch_type = PCH_CPT;
408
                DRM_DEBUG_KMS("Found PatherPoint PCH\n");
3243 Serge 409
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
3031 serge 410
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
411
				dev_priv->pch_type = PCH_LPT;
412
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
3243 Serge 413
				WARN_ON(!IS_HASWELL(dev));
3746 Serge 414
				WARN_ON(IS_ULT(dev));
3243 Serge 415
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
416
				dev_priv->pch_type = PCH_LPT;
417
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
418
				WARN_ON(!IS_HASWELL(dev));
3746 Serge 419
				WARN_ON(!IS_ULT(dev));
4104 Serge 420
			} else {
421
				goto check_next;
2326 Serge 422
            }
4104 Serge 423
			break;
2326 Serge 424
        }
4104 Serge 425
check_next:
426
		pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
427
//       pci_dev_put(curr);
2326 Serge 428
    }
4104 Serge 429
	if (!pch)
430
		DRM_DEBUG_KMS("No PCH found?\n");
2326 Serge 431
}
432
 
3031 serge 433
bool i915_semaphore_is_enabled(struct drm_device *dev)
2326 Serge 434
{
3031 serge 435
	if (INTEL_INFO(dev)->gen < 6)
436
		return 0;
2326 Serge 437
 
3031 serge 438
	if (i915_semaphores >= 0)
439
		return i915_semaphores;
2326 Serge 440
 
3031 serge 441
#ifdef CONFIG_INTEL_IOMMU
442
	/* Enable semaphores on SNB when IO remapping is off */
443
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
444
		return false;
445
#endif
2326 Serge 446
 
3031 serge 447
	return 1;
2326 Serge 448
}
449
 
4104 Serge 450
#if 0
451
static int i915_drm_freeze(struct drm_device *dev)
452
{
453
	struct drm_i915_private *dev_priv = dev->dev_private;
454
	struct drm_crtc *crtc;
2342 Serge 455
 
4104 Serge 456
	/* ignore lid events during suspend */
457
	mutex_lock(&dev_priv->modeset_restore_lock);
458
	dev_priv->modeset_restore = MODESET_SUSPENDED;
459
	mutex_unlock(&dev_priv->modeset_restore_lock);
2342 Serge 460
 
4104 Serge 461
	/* We do a lot of poking in a lot of registers, make sure they work
462
	 * properly. */
463
	hsw_disable_package_c8(dev_priv);
464
	intel_set_power_well(dev, true);
2342 Serge 465
 
4104 Serge 466
	drm_kms_helper_poll_disable(dev);
2342 Serge 467
 
4104 Serge 468
	pci_save_state(dev->pdev);
2325 Serge 469
 
4104 Serge 470
	/* If KMS is active, we do the leavevt stuff here */
471
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
472
		int error;
473
 
474
		mutex_lock(&dev->struct_mutex);
475
		error = i915_gem_idle(dev);
476
		mutex_unlock(&dev->struct_mutex);
477
		if (error) {
478
			dev_err(&dev->pdev->dev,
479
				"GEM idle failed, resume might fail\n");
480
			return error;
481
		}
482
 
483
		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
484
 
485
		drm_irq_uninstall(dev);
486
		dev_priv->enable_hotplug_processing = false;
487
		/*
488
		 * Disable CRTCs directly since we want to preserve sw state
489
		 * for _thaw.
490
		 */
491
		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
492
			dev_priv->display.crtc_disable(crtc);
493
 
494
		intel_modeset_suspend_hw(dev);
495
	}
496
 
497
	i915_save_state(dev);
498
 
499
	intel_opregion_fini(dev);
500
 
501
	console_lock();
502
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
503
	console_unlock();
504
 
505
	return 0;
506
}
507
 
508
int i915_suspend(struct drm_device *dev, pm_message_t state)
2325 Serge 509
{
4104 Serge 510
	int error;
2325 Serge 511
 
4104 Serge 512
	if (!dev || !dev->dev_private) {
513
		DRM_ERROR("dev: %p\n", dev);
514
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
515
		return -ENODEV;
516
	}
2325 Serge 517
 
4104 Serge 518
	if (state.event == PM_EVENT_PRETHAW)
519
		return 0;
3031 serge 520
 
521
 
4104 Serge 522
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
523
		return 0;
2325 Serge 524
 
4104 Serge 525
	error = i915_drm_freeze(dev);
526
	if (error)
527
		return error;
3031 serge 528
 
4104 Serge 529
	if (state.event == PM_EVENT_SUSPEND) {
530
		/* Shut down the device */
531
		pci_disable_device(dev->pdev);
532
		pci_set_power_state(dev->pdev, PCI_D3hot);
533
	}
3031 serge 534
 
4104 Serge 535
	return 0;
536
}
2325 Serge 537
 
4104 Serge 538
void intel_console_resume(struct work_struct *work)
539
{
540
	struct drm_i915_private *dev_priv =
541
		container_of(work, struct drm_i915_private,
542
			     console_resume_work);
543
	struct drm_device *dev = dev_priv->dev;
544
 
545
	console_lock();
546
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
547
	console_unlock();
2325 Serge 548
}
549
 
4104 Serge 550
static void intel_resume_hotplug(struct drm_device *dev)
551
{
552
	struct drm_mode_config *mode_config = &dev->mode_config;
553
	struct intel_encoder *encoder;
3260 Serge 554
 
4104 Serge 555
	mutex_lock(&mode_config->mutex);
556
	DRM_DEBUG_KMS("running encoder hotplug functions\n");
3260 Serge 557
 
4104 Serge 558
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
559
		if (encoder->hot_plug)
560
			encoder->hot_plug(encoder);
561
 
562
	mutex_unlock(&mode_config->mutex);
563
 
564
	/* Just fire off a uevent and let userspace tell us what to do */
565
	drm_helper_hpd_irq_event(dev);
566
}
4126 Serge 567
 
4104 Serge 568
static int __i915_drm_thaw(struct drm_device *dev)
569
{
570
	struct drm_i915_private *dev_priv = dev->dev_private;
571
	int error = 0;
572
 
573
	i915_restore_state(dev);
574
	intel_opregion_setup(dev);
575
 
576
	/* KMS EnterVT equivalent */
577
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
578
		intel_init_pch_refclk(dev);
579
 
580
		mutex_lock(&dev->struct_mutex);
581
 
582
		error = i915_gem_init_hw(dev);
583
		mutex_unlock(&dev->struct_mutex);
584
 
585
		/* We need working interrupts for modeset enabling ... */
586
		drm_irq_install(dev);
587
 
588
		intel_modeset_init_hw(dev);
589
 
590
		drm_modeset_lock_all(dev);
591
		intel_modeset_setup_hw_state(dev, true);
592
		drm_modeset_unlock_all(dev);
593
 
594
		/*
595
		 * ... but also need to make sure that hotplug processing
596
		 * doesn't cause havoc. Like in the driver load code we don't
597
		 * bother with the tiny race here where we might loose hotplug
598
		 * notifications.
599
		 * */
600
		intel_hpd_init(dev);
601
		dev_priv->enable_hotplug_processing = true;
602
		/* Config may have changed between suspend and resume */
603
		intel_resume_hotplug(dev);
604
	}
605
 
606
	intel_opregion_init(dev);
607
 
608
	/*
609
	 * The console lock can be pretty contented on resume due
610
	 * to all the printk activity.  Try to keep it out of the hot
611
	 * path of resume if possible.
612
	 */
613
	if (console_trylock()) {
614
		intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
615
		console_unlock();
616
	} else {
617
		schedule_work(&dev_priv->console_resume_work);
618
	}
619
 
620
	/* Undo what we did at i915_drm_freeze so the refcount goes back to the
621
	 * expected level. */
622
	hsw_enable_package_c8(dev_priv);
623
 
624
	mutex_lock(&dev_priv->modeset_restore_lock);
625
	dev_priv->modeset_restore = MODESET_DONE;
626
	mutex_unlock(&dev_priv->modeset_restore_lock);
627
	return error;
628
}
629
 
630
static int i915_drm_thaw(struct drm_device *dev)
631
{
632
	int error = 0;
633
 
634
	intel_uncore_sanitize(dev);
635
 
636
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
637
		mutex_lock(&dev->struct_mutex);
638
		i915_gem_restore_gtt_mappings(dev);
639
		mutex_unlock(&dev->struct_mutex);
640
	}
641
 
642
	__i915_drm_thaw(dev);
643
 
644
	return error;
645
}
646
 
647
int i915_resume(struct drm_device *dev)
648
{
649
	struct drm_i915_private *dev_priv = dev->dev_private;
650
	int ret;
651
 
652
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
653
		return 0;
654
 
655
	if (pci_enable_device(dev->pdev))
656
		return -EIO;
657
 
658
	pci_set_master(dev->pdev);
659
 
660
	intel_uncore_sanitize(dev);
661
 
662
	/*
663
	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
664
	 * earlier) need this since the BIOS might clear all our scratch PTEs.
665
	 */
666
	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
667
	    !dev_priv->opregion.header) {
668
		mutex_lock(&dev->struct_mutex);
669
		i915_gem_restore_gtt_mappings(dev);
670
		mutex_unlock(&dev->struct_mutex);
671
	}
672
 
673
	ret = __i915_drm_thaw(dev);
674
	if (ret)
675
		return ret;
676
 
677
	drm_kms_helper_poll_enable(dev);
678
	return 0;
679
}
680
 
681
/**
682
 * i915_reset - reset chip after a hang
683
 * @dev: drm device to reset
684
 *
685
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
686
 * reset or otherwise an error code.
687
 *
688
 * Procedure is fairly simple:
689
 *   - reset the chip using the reset reg
690
 *   - re-init context state
691
 *   - re-init hardware status page
692
 *   - re-init ring buffer
693
 *   - re-init interrupt state
694
 *   - re-init display
695
 */
696
int i915_reset(struct drm_device *dev)
697
{
698
	drm_i915_private_t *dev_priv = dev->dev_private;
699
	bool simulated;
700
	int ret;
701
 
702
	if (!i915_try_reset)
703
		return 0;
704
 
705
	mutex_lock(&dev->struct_mutex);
706
 
707
	i915_gem_reset(dev);
708
 
709
	simulated = dev_priv->gpu_error.stop_rings != 0;
710
 
711
	if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
712
		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
713
		ret = -ENODEV;
714
	} else {
715
		ret = intel_gpu_reset(dev);
716
 
717
		/* Also reset the gpu hangman. */
718
		if (simulated) {
719
			DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
720
			dev_priv->gpu_error.stop_rings = 0;
721
			if (ret == -ENODEV) {
722
				DRM_ERROR("Reset not implemented, but ignoring "
723
					  "error for simulated gpu hangs\n");
724
				ret = 0;
725
			}
726
		} else
727
			dev_priv->gpu_error.last_reset = get_seconds();
728
	}
729
	if (ret) {
730
		DRM_ERROR("Failed to reset chip.\n");
731
		mutex_unlock(&dev->struct_mutex);
732
		return ret;
733
	}
734
 
735
	/* Ok, now get things going again... */
736
 
737
	/*
738
	 * Everything depends on having the GTT running, so we need to start
739
	 * there.  Fortunately we don't need to do this unless we reset the
740
	 * chip at a PCI level.
741
	 *
742
	 * Next we need to restore the context, but we don't use those
743
	 * yet either...
744
	 *
745
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
746
	 * was running at the time of the reset (i.e. we weren't VT
747
	 * switched away).
748
	 */
749
	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
750
			!dev_priv->ums.mm_suspended) {
751
		struct intel_ring_buffer *ring;
752
		int i;
753
 
754
		dev_priv->ums.mm_suspended = 0;
755
 
756
		i915_gem_init_swizzling(dev);
757
 
758
		for_each_ring(ring, dev_priv, i)
759
			ring->init(ring);
760
 
761
		i915_gem_context_init(dev);
762
		if (dev_priv->mm.aliasing_ppgtt) {
763
			ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
764
			if (ret)
765
				i915_gem_cleanup_aliasing_ppgtt(dev);
766
		}
767
 
768
		/*
769
		 * It would make sense to re-init all the other hw state, at
770
		 * least the rps/rc6/emon init done within modeset_init_hw. For
771
		 * some unknown reason, this blows up my ilk, so don't.
772
		 */
773
 
774
		mutex_unlock(&dev->struct_mutex);
775
 
776
		drm_irq_uninstall(dev);
777
		drm_irq_install(dev);
778
		intel_hpd_init(dev);
779
	} else {
780
		mutex_unlock(&dev->struct_mutex);
781
	}
782
 
783
	return 0;
784
}
785
 
786
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
787
{
788
	struct intel_device_info *intel_info =
789
		(struct intel_device_info *) ent->driver_data;
790
 
791
	/* Only bind to function 0 of the device. Early generations
792
	 * used function 1 as a placeholder for multi-head. This causes
793
	 * us confusion instead, especially on the systems where both
794
	 * functions have the same PCI-ID!
795
	 */
796
	if (PCI_FUNC(pdev->devfn))
797
		return -ENODEV;
798
 
799
	/* We've managed to ship a kms-enabled ddx that shipped with an XvMC
800
	 * implementation for gen3 (and only gen3) that used legacy drm maps
801
	 * (gasp!) to share buffers between X and the client. Hence we need to
802
	 * keep around the fake agp stuff for gen3, even when kms is enabled. */
803
	if (intel_info->gen != 3) {
804
		driver.driver_features &=
805
			~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
806
	} else if (!intel_agp_enabled) {
807
		DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
808
		return -ENODEV;
809
	}
810
 
811
	return drm_get_pci_dev(pdev, ent, &driver);
812
}
813
 
814
static void
815
i915_pci_remove(struct pci_dev *pdev)
816
{
817
	struct drm_device *dev = pci_get_drvdata(pdev);
818
 
819
	drm_put_dev(dev);
820
}
821
 
822
static int i915_pm_suspend(struct device *dev)
823
{
824
	struct pci_dev *pdev = to_pci_dev(dev);
825
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
826
	int error;
827
 
828
	if (!drm_dev || !drm_dev->dev_private) {
829
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
830
		return -ENODEV;
831
	}
832
 
833
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
834
		return 0;
835
 
836
	error = i915_drm_freeze(drm_dev);
837
	if (error)
838
		return error;
839
 
840
	pci_disable_device(pdev);
841
	pci_set_power_state(pdev, PCI_D3hot);
842
 
843
	return 0;
844
}
845
 
846
static int i915_pm_resume(struct device *dev)
847
{
848
	struct pci_dev *pdev = to_pci_dev(dev);
849
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
850
 
851
	return i915_resume(drm_dev);
852
}
853
 
854
static int i915_pm_freeze(struct device *dev)
855
{
856
	struct pci_dev *pdev = to_pci_dev(dev);
857
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
858
 
859
	if (!drm_dev || !drm_dev->dev_private) {
860
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
861
		return -ENODEV;
862
	}
863
 
864
	return i915_drm_freeze(drm_dev);
865
}
866
 
867
static int i915_pm_thaw(struct device *dev)
868
{
869
	struct pci_dev *pdev = to_pci_dev(dev);
870
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
871
 
872
	return i915_drm_thaw(drm_dev);
873
}
874
 
875
static int i915_pm_poweroff(struct device *dev)
876
{
877
	struct pci_dev *pdev = to_pci_dev(dev);
878
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
879
 
880
	return i915_drm_freeze(drm_dev);
881
}
882
 
883
#endif
884
 
3260 Serge 885
static struct drm_driver driver = {
886
    /* Don't use MTRRs here; the Xserver or userspace app should
887
     * deal with them for Intel hardware.
888
     */
3482 Serge 889
    .driver_features =
4104 Serge 890
	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
891
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
892
	    DRIVER_RENDER,
893
    .load = i915_driver_load,
3260 Serge 894
//    .unload = i915_driver_unload,
3263 Serge 895
      .open = i915_driver_open,
3260 Serge 896
//    .lastclose = i915_driver_lastclose,
897
//    .preclose = i915_driver_preclose,
898
//    .postclose = i915_driver_postclose,
899
 
900
    /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
901
//    .suspend = i915_suspend,
902
//    .resume = i915_resume,
903
 
904
//    .device_is_agp = i915_driver_device_is_agp,
905
//    .master_create = i915_master_create,
906
//    .master_destroy = i915_master_destroy,
4104 Serge 907
#if defined(CONFIG_DEBUG_FS)
908
	.debugfs_init = i915_debugfs_init,
909
	.debugfs_cleanup = i915_debugfs_cleanup,
910
#endif
3260 Serge 911
    .gem_init_object = i915_gem_init_object,
912
    .gem_free_object = i915_gem_free_object,
913
 
914
//    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
915
//    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
916
//    .gem_prime_export = i915_gem_prime_export,
917
//    .gem_prime_import = i915_gem_prime_import,
918
 
919
//    .dumb_create = i915_gem_dumb_create,
920
//    .dumb_map_offset = i915_gem_mmap_gtt,
921
//    .dumb_destroy = i915_gem_dumb_destroy,
922
//    .ioctls = i915_ioctls,
923
//    .fops = &i915_driver_fops,
924
//    .name = DRIVER_NAME,
925
//    .desc = DRIVER_DESC,
926
//    .date = DRIVER_DATE,
927
//    .major = DRIVER_MAJOR,
928
//    .minor = DRIVER_MINOR,
929
//    .patchlevel = DRIVER_PATCHLEVEL,
930
};
931
 
932
 
3243 Serge 933
 
3255 Serge 934
 
4104 Serge 935
int i915_init(void)
936
{
937
    static pci_dev_t device;
938
    const struct pci_device_id  *ent;
939
    int  err;
2325 Serge 940
 
4104 Serge 941
    ent = find_pci_device(&device, pciidlist);
942
    if( unlikely(ent == NULL) )
943
    {
944
        dbgprintf("device not found\n");
945
        return -ENODEV;
946
    };
2325 Serge 947
 
4104 Serge 948
    drm_core_init();
3255 Serge 949
 
4104 Serge 950
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
951
                                device.pci_dev.device);
952
/*
953
    if (intel_info->gen != 3) {
2325 Serge 954
 
4104 Serge 955
    } else if (init_agp() != 0) {
956
        DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
957
        return -ENODEV;
3263 Serge 958
    }
4104 Serge 959
*/
960
    err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
3263 Serge 961
 
4104 Serge 962
    return err;
963
}
2325 Serge 964
 
2330 Serge 965
 
3031 serge 966
/* We give fast paths for the really cool registers */
967
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
968
	((HAS_FORCE_WAKE((dev_priv)->dev)) && \
969
	 ((reg) < 0x40000) &&            \
970
	 ((reg) != FORCEWAKE))
2325 Serge 971
 
3031 serge 972
static bool IS_DISPLAYREG(u32 reg)
973
{
974
	/*
975
	 * This should make it easier to transition modules over to the
976
	 * new register block scheme, since we can do it incrementally.
977
	 */
978
	if (reg >= VLV_DISPLAY_BASE)
979
		return false;
980
 
981
	if (reg >= RENDER_RING_BASE &&
982
	    reg < RENDER_RING_BASE + 0xff)
983
		return false;
984
	if (reg >= GEN6_BSD_RING_BASE &&
985
	    reg < GEN6_BSD_RING_BASE + 0xff)
986
		return false;
987
	if (reg >= BLT_RING_BASE &&
988
	    reg < BLT_RING_BASE + 0xff)
989
		return false;
990
 
991
	if (reg == PGTBL_ER)
992
		return false;
993
 
994
	if (reg >= IPEIR_I965 &&
995
	    reg < HWSTAM)
996
		return false;
997
 
998
	if (reg == MI_MODE)
999
		return false;
1000
 
1001
	if (reg == GFX_MODE_GEN7)
1002
		return false;
1003
 
1004
	if (reg == RENDER_HWS_PGA_GEN7 ||
1005
	    reg == BSD_HWS_PGA_GEN7 ||
1006
	    reg == BLT_HWS_PGA_GEN7)
1007
		return false;
1008
 
1009
	if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL ||
1010
	    reg == GEN6_BSD_RNCID)
1011
		return false;
1012
 
1013
	if (reg == GEN6_BLITTER_ECOSKPD)
1014
		return false;
1015
 
1016
	if (reg >= 0x4000c &&
1017
	    reg <= 0x4002c)
1018
		return false;
1019
 
1020
	if (reg >= 0x4f000 &&
1021
	    reg <= 0x4f08f)
1022
		return false;
1023
 
1024
	if (reg >= 0x4f100 &&
1025
	    reg <= 0x4f11f)
1026
		return false;
1027
 
1028
	if (reg >= VLV_MASTER_IER &&
1029
	    reg <= GEN6_PMIER)
1030
		return false;
1031
 
1032
	if (reg >= FENCE_REG_SANDYBRIDGE_0 &&
1033
	    reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8)))
1034
		return false;
1035
 
1036
	if (reg >= VLV_IIR_RW &&
1037
	    reg <= VLV_ISR)
1038
		return false;
1039
 
1040
	if (reg == FORCEWAKE_VLV ||
1041
	    reg == FORCEWAKE_ACK_VLV)
1042
		return false;
1043
 
1044
	if (reg == GEN6_GDRST)
1045
		return false;
1046
 
3243 Serge 1047
	switch (reg) {
1048
	case _3D_CHICKEN3:
1049
	case IVB_CHICKEN3:
1050
	case GEN7_COMMON_SLICE_CHICKEN1:
1051
	case GEN7_L3CNTLREG1:
1052
	case GEN7_L3_CHICKEN_MODE_REGISTER:
1053
	case GEN7_ROW_CHICKEN2:
1054
	case GEN7_L3SQCREG4:
1055
	case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
1056
	case GEN7_HALF_SLICE_CHICKEN1:
1057
	case GEN6_MBCTL:
1058
	case GEN6_UCGCTL2:
1059
		return false;
1060
	default:
1061
		break;
1062
	}
1063
 
3031 serge 1064
	return true;
1065
}
1066
 
3746 Serge 1067
/* We give fast paths for the really cool registers */
1068
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1069
	((HAS_FORCE_WAKE((dev_priv)->dev)) && \
1070
	 ((reg) < 0x40000) &&            \
1071
	 ((reg) != FORCEWAKE))
3243 Serge 1072
static void
1073
ilk_dummy_write(struct drm_i915_private *dev_priv)
1074
{
4104 Serge 1075
	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1076
	 * the chip from rc6 before touching it for real. MI_MODE is masked,
1077
	 * hence harmless to write 0 into. */
3243 Serge 1078
	I915_WRITE_NOTRACE(MI_MODE, 0);
1079
}
1080
 
3746 Serge 1081
static void
1082
hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
1083
{
4104 Serge 1084
	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
3746 Serge 1085
	    (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1086
		DRM_ERROR("Unknown unclaimed register before writing to %x\n",
1087
			  reg);
1088
		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1089
	}
1090
}
1091
 
1092
static void
1093
hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
1094
{
4104 Serge 1095
	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
3746 Serge 1096
	    (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1097
		DRM_ERROR("Unclaimed write to %x\n", reg);
1098
		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1099
	}
1100
}
1101