Subversion Repositories Kolibri OS

Rev

Rev 4560 | Rev 5128 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4560 Rev 5060
Line 33... Line 33...
33
#include 
33
#include 
Line 34... Line 34...
34
 
34
 
35
#include "i915_reg.h"
35
#include "i915_reg.h"
36
#include "intel_bios.h"
36
#include "intel_bios.h"
37
#include "intel_ringbuffer.h"
37
#include "intel_ringbuffer.h"
38
#include 
38
#include "i915_gem_gtt.h"
39
//#include 
39
//#include 
40
#include 
40
#include 
41
#include 
41
#include 
42
#include 
42
#include 
-
 
43
//#include 
Line 43... Line 44...
43
//#include 
44
#include 
44
 
45
 
Line 61... Line 62...
61
 
62
 
Line 62... Line 63...
62
#define DRIVER_AUTHOR		"Tungsten Graphics, Inc."
63
#define DRIVER_AUTHOR		"Tungsten Graphics, Inc."
63
 
64
 
64
#define DRIVER_NAME		"i915"
65
#define DRIVER_NAME		"i915"
Line 65... Line 66...
65
#define DRIVER_DESC		"Intel Graphics"
66
#define DRIVER_DESC		"Intel Graphics"
66
#define DRIVER_DATE		"20080730"
67
#define DRIVER_DATE		"20140725"
67
 
68
 
68
enum pipe {
69
enum pipe {
69
	INVALID_PIPE = -1,
70
	INVALID_PIPE = -1,
-
 
71
	PIPE_A = 0,
70
	PIPE_A = 0,
72
	PIPE_B,
71
	PIPE_B,
73
	PIPE_C,
72
	PIPE_C,
74
	_PIPE_EDP,
Line 73... Line 75...
73
	I915_MAX_PIPES
75
	I915_MAX_PIPES = _PIPE_EDP
74
};
76
};
75
#define pipe_name(p) ((p) + 'A')
77
#define pipe_name(p) ((p) + 'A')
76
 
78
 
77
enum transcoder {
79
enum transcoder {
-
 
80
	TRANSCODER_A = 0,
78
	TRANSCODER_A = 0,
81
	TRANSCODER_B,
79
	TRANSCODER_B,
82
	TRANSCODER_C,
Line 80... Line 83...
80
	TRANSCODER_C,
83
	TRANSCODER_EDP,
81
	TRANSCODER_EDP = 0xF,
84
	I915_MAX_TRANSCODERS
82
};
85
};
83
#define transcoder_name(t) ((t) + 'A')
86
#define transcoder_name(t) ((t) + 'A')
84
 
87
 
85
enum plane {
88
enum plane {
Line 86... Line 89...
86
	PLANE_A = 0,
89
	PLANE_A = 0,
Line 87... Line 90...
87
	PLANE_B,
90
	PLANE_B,
88
	PLANE_C,
91
	PLANE_C,
89
};
92
};
90
#define plane_name(p) ((p) + 'A')
93
#define plane_name(p) ((p) + 'A')
Line 99... Line 102...
99
	PORT_E,
102
	PORT_E,
100
	I915_MAX_PORTS
103
	I915_MAX_PORTS
101
};
104
};
102
#define port_name(p) ((p) + 'A')
105
#define port_name(p) ((p) + 'A')
Line 103... Line 106...
103
 
106
 
Line 104... Line 107...
104
#define I915_NUM_PHYS_VLV 1
107
#define I915_NUM_PHYS_VLV 2
105
 
108
 
106
enum dpio_channel {
109
enum dpio_channel {
107
	DPIO_CH0,
110
	DPIO_CH0,
Line 122... Line 125...
122
	POWER_DOMAIN_PIPE_C_PANEL_FITTER,
125
	POWER_DOMAIN_PIPE_C_PANEL_FITTER,
123
	POWER_DOMAIN_TRANSCODER_A,
126
	POWER_DOMAIN_TRANSCODER_A,
124
	POWER_DOMAIN_TRANSCODER_B,
127
	POWER_DOMAIN_TRANSCODER_B,
125
	POWER_DOMAIN_TRANSCODER_C,
128
	POWER_DOMAIN_TRANSCODER_C,
126
	POWER_DOMAIN_TRANSCODER_EDP,
129
	POWER_DOMAIN_TRANSCODER_EDP,
-
 
130
	POWER_DOMAIN_PORT_DDI_A_2_LANES,
-
 
131
	POWER_DOMAIN_PORT_DDI_A_4_LANES,
-
 
132
	POWER_DOMAIN_PORT_DDI_B_2_LANES,
-
 
133
	POWER_DOMAIN_PORT_DDI_B_4_LANES,
-
 
134
	POWER_DOMAIN_PORT_DDI_C_2_LANES,
-
 
135
	POWER_DOMAIN_PORT_DDI_C_4_LANES,
-
 
136
	POWER_DOMAIN_PORT_DDI_D_2_LANES,
-
 
137
	POWER_DOMAIN_PORT_DDI_D_4_LANES,
-
 
138
	POWER_DOMAIN_PORT_DSI,
-
 
139
	POWER_DOMAIN_PORT_CRT,
-
 
140
	POWER_DOMAIN_PORT_OTHER,
127
	POWER_DOMAIN_VGA,
141
	POWER_DOMAIN_VGA,
128
	POWER_DOMAIN_AUDIO,
142
	POWER_DOMAIN_AUDIO,
-
 
143
	POWER_DOMAIN_PLLS,
129
	POWER_DOMAIN_INIT,
144
	POWER_DOMAIN_INIT,
Line 130... Line 145...
130
 
145
 
131
	POWER_DOMAIN_NUM,
146
	POWER_DOMAIN_NUM,
Line 132... Line -...
132
};
-
 
133
 
-
 
134
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
147
};
135
 
148
 
136
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
149
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
137
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
150
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
138
		((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
151
		((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
139
#define POWER_DOMAIN_TRANSCODER(tran) \
152
#define POWER_DOMAIN_TRANSCODER(tran) \
Line 140... Line -...
140
	((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
-
 
141
	 (tran) + POWER_DOMAIN_TRANSCODER_A)
-
 
142
 
-
 
143
#define HSW_ALWAYS_ON_POWER_DOMAINS (		\
-
 
144
	BIT(POWER_DOMAIN_PIPE_A) |		\
-
 
145
	BIT(POWER_DOMAIN_TRANSCODER_EDP))
-
 
146
#define BDW_ALWAYS_ON_POWER_DOMAINS (		\
-
 
147
	BIT(POWER_DOMAIN_PIPE_A) |		\
-
 
148
	BIT(POWER_DOMAIN_TRANSCODER_EDP) |	\
153
	((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
149
	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
154
	 (tran) + POWER_DOMAIN_TRANSCODER_A)
150
 
155
 
151
enum hpd_pin {
156
enum hpd_pin {
152
	HPD_NONE = 0,
157
	HPD_NONE = 0,
Line 167... Line 172...
167
	 I915_GEM_DOMAIN_COMMAND | \
172
	 I915_GEM_DOMAIN_COMMAND | \
168
	 I915_GEM_DOMAIN_INSTRUCTION | \
173
	 I915_GEM_DOMAIN_INSTRUCTION | \
169
	 I915_GEM_DOMAIN_VERTEX)
174
	 I915_GEM_DOMAIN_VERTEX)
Line 170... Line 175...
170
 
175
 
-
 
176
#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
-
 
177
#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
-
 
178
 
-
 
179
#define for_each_crtc(dev, crtc) \
-
 
180
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
-
 
181
 
-
 
182
#define for_each_intel_crtc(dev, intel_crtc) \
Line 171... Line 183...
171
#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
183
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
172
 
184
 
173
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
185
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
Line -... Line 186...
-
 
186
	list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
-
 
187
		if ((intel_encoder)->base.crtc == (__crtc))
-
 
188
 
-
 
189
#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
-
 
190
	list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
-
 
191
		if ((intel_connector)->base.encoder == (__encoder))
-
 
192
 
-
 
193
#define for_each_power_domain(domain, mask)				\
174
	list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
194
	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
-
 
195
		if ((1 << (domain)) & (mask))
Line 175... Line 196...
175
		if ((intel_encoder)->base.crtc == (__crtc))
196
 
176
 
197
struct drm_i915_private;
177
struct drm_i915_private;
198
struct i915_mmu_object;
178
 
199
 
179
enum intel_dpll_id {
200
enum intel_dpll_id {
-
 
201
	DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
-
 
202
	/* real shared dpll ids must be >= 0 */
180
	DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
203
	DPLL_ID_PCH_PLL_A = 0,
181
	/* real shared dpll ids must be >= 0 */
204
	DPLL_ID_PCH_PLL_B = 1,
Line 182... Line 205...
182
	DPLL_ID_PCH_PLL_A,
205
	DPLL_ID_WRPLL1 = 0,
183
	DPLL_ID_PCH_PLL_B,
206
	DPLL_ID_WRPLL2 = 1,
184
};
207
};
185
#define I915_NUM_PLLS 2
208
#define I915_NUM_PLLS 2
186
 
209
 
-
 
210
struct intel_dpll_hw_state {
187
struct intel_dpll_hw_state {
211
	uint32_t dpll;
Line 188... Line 212...
188
	uint32_t dpll;
212
	uint32_t dpll_md;
189
	uint32_t dpll_md;
213
	uint32_t fp0;
190
	uint32_t fp0;
214
	uint32_t fp1;
191
	uint32_t fp1;
215
	uint32_t wrpll;
192
};
216
};
193
 
217
 
194
struct intel_shared_dpll {
218
struct intel_shared_dpll {
195
	int refcount; /* count of number of CRTCs sharing this PLL */
219
	int refcount; /* count of number of CRTCs sharing this PLL */
-
 
220
	int active; /* count of number of active CRTCs (i.e. DPMS on) */
-
 
221
	bool on; /* is the PLL actually active? Disabled during modeset */
196
	int active; /* count of number of active CRTCs (i.e. DPMS on) */
222
	const char *name;
197
	bool on; /* is the PLL actually active? Disabled during modeset */
223
	/* should match the index in the dev_priv->shared_dplls array */
198
	const char *name;
224
	enum intel_dpll_id id;
199
	/* should match the index in the dev_priv->shared_dplls array */
225
	struct intel_dpll_hw_state hw_state;
200
	enum intel_dpll_id id;
226
	/* The mode_set hook is optional and should be used together with the
Line 221... Line 247...
221
 
247
 
222
void intel_link_compute_m_n(int bpp, int nlanes,
248
void intel_link_compute_m_n(int bpp, int nlanes,
223
			    int pixel_clock, int link_clock,
249
			    int pixel_clock, int link_clock,
Line 224... Line -...
224
			    struct intel_link_m_n *m_n);
-
 
225
 
-
 
226
struct intel_ddi_plls {
-
 
227
	int spll_refcount;
-
 
228
	int wrpll1_refcount;
-
 
229
	int wrpll2_refcount;
-
 
230
};
250
			    struct intel_link_m_n *m_n);
231
 
251
 
232
/* Interface history:
252
/* Interface history:
233
 *
253
 *
234
 * 1.1: Original.
254
 * 1.1: Original.
Line 244... Line 264...
244
#define DRIVER_PATCHLEVEL	0
264
#define DRIVER_PATCHLEVEL	0
Line 245... Line 265...
245
 
265
 
246
#define WATCH_LISTS	0
266
#define WATCH_LISTS	0
Line 247... Line -...
247
#define WATCH_GTT	0
-
 
248
 
-
 
249
#define I915_GEM_PHYS_CURSOR_0 1
-
 
250
#define I915_GEM_PHYS_CURSOR_1 2
-
 
251
#define I915_GEM_PHYS_OVERLAY_REGS 3
-
 
252
#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
-
 
253
 
-
 
254
struct drm_i915_gem_phys_object {
-
 
255
	int id;
-
 
256
	struct page **page_list;
-
 
257
	drm_dma_handle_t *handle;
-
 
258
	struct drm_i915_gem_object *cur_obj;
-
 
259
};
267
#define WATCH_GTT	0
260
 
268
 
261
struct opregion_header;
269
struct opregion_header;
262
struct opregion_acpi;
270
struct opregion_acpi;
Line 305... Line 313...
305
 
313
 
Line 306... Line 314...
306
struct intel_display_error_state;
314
struct intel_display_error_state;
307
 
315
 
-
 
316
struct drm_i915_error_state {
-
 
317
	struct kref ref;
-
 
318
	struct timeval time;
-
 
319
 
-
 
320
	char error_msg[128];
-
 
321
	u32 reset_count;
-
 
322
	u32 suspend_count;
308
struct drm_i915_error_state {
323
 
309
	struct kref ref;
324
	/* Generic register state */
310
	u32 eir;
325
	u32 eir;
-
 
326
	u32 pgtbl_er;
311
	u32 pgtbl_er;
327
	u32 ier;
312
	u32 ier;
328
	u32 gtier[4];
313
	u32 ccid;
329
	u32 ccid;
314
	u32 derrmr;
-
 
315
	u32 forcewake;
-
 
316
	bool waiting[I915_NUM_RINGS];
-
 
317
	u32 pipestat[I915_MAX_PIPES];
-
 
318
	u32 tail[I915_NUM_RINGS];
-
 
319
	u32 head[I915_NUM_RINGS];
-
 
320
	u32 ctl[I915_NUM_RINGS];
-
 
321
	u32 ipeir[I915_NUM_RINGS];
-
 
322
	u32 ipehr[I915_NUM_RINGS];
-
 
323
	u32 instdone[I915_NUM_RINGS];
-
 
324
	u32 acthd[I915_NUM_RINGS];
-
 
325
	u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
-
 
326
	u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
-
 
327
	u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
-
 
328
	/* our own tracking of ring head and tail */
-
 
329
	u32 cpu_ring_head[I915_NUM_RINGS];
330
	u32 derrmr;
330
	u32 cpu_ring_tail[I915_NUM_RINGS];
331
	u32 forcewake;
331
	u32 error; /* gen6+ */
-
 
332
	u32 err_int; /* gen7 */
-
 
333
	u32 bbstate[I915_NUM_RINGS];
-
 
334
	u32 instpm[I915_NUM_RINGS];
-
 
335
	u32 instps[I915_NUM_RINGS];
-
 
336
	u32 extra_instdone[I915_NUM_INSTDONE_REG];
-
 
337
	u32 seqno[I915_NUM_RINGS];
-
 
338
	u64 bbaddr[I915_NUM_RINGS];
332
	u32 error; /* gen6+ */
-
 
333
	u32 err_int; /* gen7 */
-
 
334
	u32 done_reg;
-
 
335
	u32 gac_eco;
-
 
336
	u32 gam_ecochk;
339
	u32 fault_reg[I915_NUM_RINGS];
337
	u32 gab_ctl;
340
	u32 done_reg;
338
	u32 gfx_mode;
-
 
339
	u32 extra_instdone[I915_NUM_INSTDONE_REG];
341
	u32 faddr[I915_NUM_RINGS];
340
	u64 fence[I915_MAX_NUM_FENCES];
-
 
341
	struct intel_overlay_error_state *overlay;
342
	u64 fence[I915_MAX_NUM_FENCES];
342
	struct intel_display_error_state *display;
343
	struct timeval time;
343
 
-
 
344
	struct drm_i915_error_ring {
-
 
345
		bool valid;
-
 
346
		/* Software tracked state */
-
 
347
		bool waiting;
-
 
348
		int hangcheck_score;
-
 
349
		enum intel_ring_hangcheck_action hangcheck_action;
-
 
350
		int num_requests;
-
 
351
 
-
 
352
		/* our own tracking of ring head and tail */
-
 
353
		u32 cpu_ring_head;
-
 
354
		u32 cpu_ring_tail;
-
 
355
 
-
 
356
		u32 semaphore_seqno[I915_NUM_RINGS - 1];
-
 
357
 
-
 
358
		/* Register state */
-
 
359
		u32 tail;
-
 
360
		u32 head;
-
 
361
		u32 ctl;
-
 
362
		u32 hws;
-
 
363
		u32 ipeir;
-
 
364
		u32 ipehr;
-
 
365
		u32 instdone;
-
 
366
		u32 bbstate;
-
 
367
		u32 instpm;
-
 
368
		u32 instps;
-
 
369
		u32 seqno;
-
 
370
		u64 bbaddr;
-
 
371
		u64 acthd;
-
 
372
		u32 fault_reg;
-
 
373
		u64 faddr;
-
 
374
		u32 rc_psmi; /* sleep state */
344
	struct drm_i915_error_ring {
375
		u32 semaphore_mboxes[I915_NUM_RINGS - 1];
345
		bool valid;
376
 
346
	struct drm_i915_error_object {
377
	struct drm_i915_error_object {
347
		int page_count;
378
		int page_count;
348
		u32 gtt_offset;
379
		u32 gtt_offset;
-
 
380
		u32 *pages[0];
349
		u32 *pages[0];
381
		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
350
		} *ringbuffer, *batchbuffer, *ctx;
382
 
351
		struct drm_i915_error_request {
383
		struct drm_i915_error_request {
352
			long jiffies;
384
			long jiffies;
353
			u32 seqno;
385
			u32 seqno;
-
 
386
			u32 tail;
-
 
387
		} *requests;
354
			u32 tail;
388
 
-
 
389
		struct {
-
 
390
			u32 gfx_mode;
-
 
391
			union {
-
 
392
				u64 pdp[4];
-
 
393
				u32 pp_dir_base;
-
 
394
			};
-
 
395
		} vm_info;
-
 
396
 
355
		} *requests;
397
		pid_t pid;
356
		int num_requests;
398
		char comm[TASK_COMM_LEN];
357
	} ring[I915_NUM_RINGS];
399
	} ring[I915_NUM_RINGS];
358
	struct drm_i915_error_buffer {
400
	struct drm_i915_error_buffer {
359
		u32 size;
401
		u32 size;
Line 365... Line 407...
365
		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
407
		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
366
		s32 pinned:2;
408
		s32 pinned:2;
367
		u32 tiling:2;
409
		u32 tiling:2;
368
		u32 dirty:1;
410
		u32 dirty:1;
369
		u32 purgeable:1;
411
		u32 purgeable:1;
-
 
412
		u32 userptr:1;
370
		s32 ring:4;
413
		s32 ring:4;
371
		u32 cache_level:3;
414
		u32 cache_level:3;
372
	} **active_bo, **pinned_bo;
415
	} **active_bo, **pinned_bo;
-
 
416
 
373
	u32 *active_bo_count, *pinned_bo_count;
417
	u32 *active_bo_count, *pinned_bo_count;
374
	struct intel_overlay_error_state *overlay;
-
 
375
	struct intel_display_error_state *display;
-
 
376
	int hangcheck_score[I915_NUM_RINGS];
-
 
377
	enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
-
 
378
};
418
};
Line 379... Line 419...
379
 
419
 
380
struct intel_connector;
420
struct intel_connector;
-
 
421
struct intel_crtc_config;
381
struct intel_crtc_config;
422
struct intel_plane_config;
382
struct intel_crtc;
423
struct intel_crtc;
383
struct intel_limit;
424
struct intel_limit;
Line 384... Line 425...
384
struct dpll;
425
struct dpll;
Line 408... Line 449...
408
			  struct dpll *match_clock,
449
			  struct dpll *match_clock,
409
			  struct dpll *best_clock);
450
			  struct dpll *best_clock);
410
	void (*update_wm)(struct drm_crtc *crtc);
451
	void (*update_wm)(struct drm_crtc *crtc);
411
	void (*update_sprite_wm)(struct drm_plane *plane,
452
	void (*update_sprite_wm)(struct drm_plane *plane,
412
				 struct drm_crtc *crtc,
453
				 struct drm_crtc *crtc,
413
				 uint32_t sprite_width, int pixel_size,
454
				 uint32_t sprite_width, uint32_t sprite_height,
414
				 bool enable, bool scaled);
455
				 int pixel_size, bool enable, bool scaled);
415
	void (*modeset_global_resources)(struct drm_device *dev);
456
	void (*modeset_global_resources)(struct drm_device *dev);
416
	/* Returns the active state of the crtc, and if the crtc is active,
457
	/* Returns the active state of the crtc, and if the crtc is active,
417
	 * fills out the pipe-config with the hw state. */
458
	 * fills out the pipe-config with the hw state. */
418
	bool (*get_pipe_config)(struct intel_crtc *,
459
	bool (*get_pipe_config)(struct intel_crtc *,
419
				struct intel_crtc_config *);
460
				struct intel_crtc_config *);
-
 
461
	void (*get_plane_config)(struct intel_crtc *,
-
 
462
				 struct intel_plane_config *);
420
	int (*crtc_mode_set)(struct drm_crtc *crtc,
463
	int (*crtc_mode_set)(struct drm_crtc *crtc,
421
			     int x, int y,
464
			     int x, int y,
422
			     struct drm_framebuffer *old_fb);
465
			     struct drm_framebuffer *old_fb);
423
	void (*crtc_enable)(struct drm_crtc *crtc);
466
	void (*crtc_enable)(struct drm_crtc *crtc);
424
	void (*crtc_disable)(struct drm_crtc *crtc);
467
	void (*crtc_disable)(struct drm_crtc *crtc);
Line 429... Line 472...
429
	void (*fdi_link_train)(struct drm_crtc *crtc);
472
	void (*fdi_link_train)(struct drm_crtc *crtc);
430
	void (*init_clock_gating)(struct drm_device *dev);
473
	void (*init_clock_gating)(struct drm_device *dev);
431
	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
474
	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
432
			  struct drm_framebuffer *fb,
475
			  struct drm_framebuffer *fb,
433
			  struct drm_i915_gem_object *obj,
476
			  struct drm_i915_gem_object *obj,
-
 
477
			  struct intel_engine_cs *ring,
434
			  uint32_t flags);
478
			  uint32_t flags);
435
	int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
479
	void (*update_primary_plane)(struct drm_crtc *crtc,
-
 
480
				    struct drm_framebuffer *fb,
436
			    int x, int y);
481
			    int x, int y);
437
	void (*hpd_irq_setup)(struct drm_device *dev);
482
	void (*hpd_irq_setup)(struct drm_device *dev);
438
	/* clock updates for mode set */
483
	/* clock updates for mode set */
439
	/* cursor updates */
484
	/* cursor updates */
440
	/* render clock increase/decrease */
485
	/* render clock increase/decrease */
Line 479... Line 524...
479
	unsigned forcewake_count;
524
	unsigned forcewake_count;
Line 480... Line 525...
480
 
525
 
481
	unsigned fw_rendercount;
526
	unsigned fw_rendercount;
Line 482... Line 527...
482
	unsigned fw_mediacount;
527
	unsigned fw_mediacount;
483
 
528
 
Line 484... Line 529...
484
	struct delayed_work force_wake_work;
529
	struct timer_list force_wake_timer;
485
};
530
};
486
 
531
 
Line 514... Line 559...
514
#define SEP_SEMICOLON ;
559
#define SEP_SEMICOLON ;
Line 515... Line 560...
515
 
560
 
516
struct intel_device_info {
561
struct intel_device_info {
517
	u32 display_mmio_offset;
562
	u32 display_mmio_offset;
-
 
563
	u8 num_pipes:3;
518
	u8 num_pipes:3;
564
	u8 num_sprites[I915_MAX_PIPES];
519
	u8 gen;
565
	u8 gen;
520
	u8 ring_mask; /* Rings supported by the HW */
566
	u8 ring_mask; /* Rings supported by the HW */
-
 
567
	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
-
 
568
	/* Register offsets for the various display pipes and transcoders */
-
 
569
	int pipe_offsets[I915_MAX_TRANSCODERS];
-
 
570
	int trans_offsets[I915_MAX_TRANSCODERS];
-
 
571
	int palette_offsets[I915_MAX_PIPES];
521
	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
572
	int cursor_offsets[I915_MAX_PIPES];
Line 522... Line 573...
522
};
573
};
523
 
574
 
Line 532... Line 583...
532
			      large Last-Level-Cache. LLC is coherent with
583
			      large Last-Level-Cache. LLC is coherent with
533
			      the CPU, but L3 is only visible to the GPU. */
584
			      the CPU, but L3 is only visible to the GPU. */
534
	I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
585
	I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
535
};
586
};
Line 536... Line -...
536
 
-
 
537
typedef uint32_t gen6_gtt_pte_t;
-
 
538
 
-
 
539
struct i915_address_space {
-
 
540
	struct drm_mm mm;
-
 
541
	struct drm_device *dev;
-
 
542
	struct list_head global_link;
-
 
543
	unsigned long start;		/* Start offset always 0 for dri2 */
-
 
544
	size_t total;		/* size addr space maps (ex. 2GB for ggtt) */
-
 
545
 
-
 
546
	struct {
-
 
547
		dma_addr_t addr;
-
 
548
		struct page *page;
-
 
549
	} scratch;
-
 
550
 
-
 
551
	/**
-
 
552
	 * List of objects currently involved in rendering.
-
 
553
	 *
-
 
554
	 * Includes buffers having the contents of their GPU caches
-
 
555
	 * flushed, not necessarily primitives.  last_rendering_seqno
-
 
556
	 * represents when the rendering involved will be completed.
-
 
557
	 *
-
 
558
	 * A reference is held on the buffer while on this list.
-
 
559
	 */
-
 
560
	struct list_head active_list;
-
 
561
 
-
 
562
	/**
-
 
563
	 * LRU list of objects which are not in the ringbuffer and
-
 
564
	 * are ready to unbind, but are still in the GTT.
-
 
565
	 *
-
 
566
	 * last_rendering_seqno is 0 while an object is in this list.
-
 
567
	 *
-
 
568
	 * A reference is not held on the buffer while on this list,
-
 
569
	 * as merely being GTT-bound shouldn't prevent its being
-
 
570
	 * freed, and we'll pull it off the list in the free path.
-
 
571
	 */
-
 
572
	struct list_head inactive_list;
-
 
573
 
-
 
574
	/* FIXME: Need a more generic return type */
-
 
575
	gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
-
 
576
				     enum i915_cache_level level,
-
 
577
				     bool valid); /* Create a valid PTE */
-
 
578
	void (*clear_range)(struct i915_address_space *vm,
-
 
579
			    unsigned int first_entry,
-
 
580
			    unsigned int num_entries,
-
 
581
			    bool use_scratch);
-
 
582
	void (*insert_entries)(struct i915_address_space *vm,
-
 
583
			       struct sg_table *st,
-
 
584
			       unsigned int first_entry,
-
 
585
			       enum i915_cache_level cache_level);
-
 
586
	void (*cleanup)(struct i915_address_space *vm);
-
 
587
};
-
 
588
 
-
 
589
/* The Graphics Translation Table is the way in which GEN hardware translates a
-
 
590
 * Graphics Virtual Address into a Physical Address. In addition to the normal
-
 
591
 * collateral associated with any va->pa translations GEN hardware also has a
-
 
592
 * portion of the GTT which can be mapped by the CPU and remain both coherent
-
 
593
 * and correct (in cases like swizzling). That region is referred to as GMADR in
-
 
594
 * the spec.
-
 
595
 */
-
 
596
struct i915_gtt {
-
 
597
	struct i915_address_space base;
-
 
598
	size_t stolen_size;		/* Total size of stolen memory */
-
 
599
 
-
 
600
	unsigned long mappable_end;	/* End offset that we can CPU map */
-
 
601
	void *mappable;	/* Mapping to our CPU mappable region */
-
 
602
	phys_addr_t mappable_base;	/* PA of our GMADR */
-
 
603
 
-
 
604
	/** "Graphics Stolen Memory" holds the global PTEs */
-
 
605
	void __iomem *gsm;
-
 
606
 
-
 
607
	bool do_idle_maps;
-
 
608
 
-
 
609
	int mtrr;
-
 
610
 
-
 
611
	/* global gtt ops */
-
 
612
	int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
-
 
613
			  size_t *stolen, phys_addr_t *mappable_base,
-
 
614
			  unsigned long *mappable_end);
-
 
615
};
-
 
616
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
-
 
617
 
-
 
618
struct i915_hw_ppgtt {
-
 
619
	struct i915_address_space base;
-
 
620
	unsigned num_pd_entries;
-
 
621
	union {
-
 
622
	struct page **pt_pages;
-
 
623
		struct page *gen8_pt_pages;
-
 
624
	};
-
 
625
    struct page **pd_pages;
-
 
626
	int num_pd_pages;
-
 
627
	int num_pt_pages;
-
 
628
	union {
-
 
629
	uint32_t pd_offset;
-
 
630
		dma_addr_t pd_dma_addr[4];
-
 
631
	};
-
 
632
	union {
-
 
633
	dma_addr_t *pt_dma_addr;
-
 
634
		dma_addr_t *gen8_pt_dma_addr[4];
-
 
635
	};
-
 
636
	int (*enable)(struct drm_device *dev);
-
 
637
};
-
 
638
 
-
 
639
/**
-
 
640
 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
-
 
641
 * VMA's presence cannot be guaranteed before binding, or after unbinding the
-
 
642
 * object into/from the address space.
-
 
643
 *
-
 
644
 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
-
 
645
 * will always be <= an objects lifetime. So object refcounting should cover us.
-
 
646
 */
-
 
647
struct i915_vma {
-
 
648
	struct drm_mm_node node;
-
 
649
	struct drm_i915_gem_object *obj;
-
 
650
	struct i915_address_space *vm;
-
 
651
 
-
 
652
	/** This object's place on the active/inactive lists */
-
 
653
	struct list_head mm_list;
-
 
654
 
-
 
655
	struct list_head vma_link; /* Link in the object's VMA list */
-
 
656
 
-
 
657
	/** This vma's place in the batchbuffer or on the eviction list */
-
 
658
	struct list_head exec_list;
-
 
659
 
-
 
660
	/**
-
 
661
	 * Used for performing relocations during execbuffer insertion.
-
 
662
	 */
-
 
663
	struct hlist_node exec_node;
-
 
664
	unsigned long exec_handle;
-
 
665
	struct drm_i915_gem_exec_object2 *exec_entry;
-
 
666
 
-
 
667
};
-
 
668
 
587
 
669
struct i915_ctx_hang_stats {
588
struct i915_ctx_hang_stats {
670
	/* This context had batch pending when hang was declared */
589
	/* This context had batch pending when hang was declared */
Line 671... Line 590...
671
	unsigned batch_pending;
590
	unsigned batch_pending;
Line 679... Line 598...
679
	/* This context is banned to submit more work */
598
	/* This context is banned to submit more work */
680
	bool banned;
599
	bool banned;
681
};
600
};
Line 682... Line 601...
682
 
601
 
683
/* This must match up with the value previously used for execbuf2.rsvd1. */
602
/* This must match up with the value previously used for execbuf2.rsvd1. */
-
 
603
#define DEFAULT_CONTEXT_HANDLE 0
-
 
604
/**
-
 
605
 * struct intel_context - as the name implies, represents a context.
-
 
606
 * @ref: reference count.
-
 
607
 * @user_handle: userspace tracking identity for this context.
-
 
608
 * @remap_slice: l3 row remapping information.
-
 
609
 * @file_priv: filp associated with this context (NULL for global default
-
 
610
 *	       context).
-
 
611
 * @hang_stats: information about the role of this context in possible GPU
-
 
612
 *		hangs.
-
 
613
 * @vm: virtual memory space used by this context.
-
 
614
 * @legacy_hw_ctx: render context backing object and whether it is correctly
-
 
615
 *                initialized (legacy ring submission mechanism only).
-
 
616
 * @link: link in the global list of contexts.
-
 
617
 *
-
 
618
 * Contexts are memory images used by the hardware to store copies of their
-
 
619
 * internal state.
684
#define DEFAULT_CONTEXT_ID 0
620
 */
685
struct i915_hw_context {
621
struct intel_context {
686
	struct kref ref;
622
	struct kref ref;
687
	int id;
-
 
688
	bool is_initialized;
623
	int user_handle;
689
	uint8_t remap_slice;
624
	uint8_t remap_slice;
690
	struct drm_i915_file_private *file_priv;
-
 
691
	struct intel_ring_buffer *ring;
-
 
692
	struct drm_i915_gem_object *obj;
625
	struct drm_i915_file_private *file_priv;
-
 
626
	struct i915_ctx_hang_stats hang_stats;
-
 
627
	struct i915_address_space *vm;
-
 
628
 
-
 
629
	struct {
-
 
630
		struct drm_i915_gem_object *rcs_state;
-
 
631
		bool initialized;
Line 693... Line 632...
693
	struct i915_ctx_hang_stats hang_stats;
632
	} legacy_hw_ctx;
694
 
633
 
Line 695... Line 634...
695
	struct list_head link;
634
	struct list_head link;
696
};
635
};
-
 
636
 
697
 
637
struct i915_fbc {
698
struct i915_fbc {
638
	unsigned long size;
699
	unsigned long size;
639
	unsigned threshold;
Line 700... Line 640...
700
	unsigned int fb_id;
640
	unsigned int fb_id;
701
	enum plane plane;
641
	enum plane plane;
Line 702... Line 642...
702
	int y;
642
	int y;
703
 
643
 
704
	struct drm_mm_node *compressed_fb;
644
	struct drm_mm_node compressed_fb;
Line 723... Line 663...
723
	FBC_MODULE_PARAM,
663
	FBC_MODULE_PARAM,
724
		FBC_CHIP_DEFAULT, /* disabled by default on this chip */
664
		FBC_CHIP_DEFAULT, /* disabled by default on this chip */
725
	} no_fbc_reason;
665
	} no_fbc_reason;
726
};
666
};
Line -... Line 667...
-
 
667
 
-
 
668
struct i915_drrs {
-
 
669
	struct intel_connector *connector;
-
 
670
};
-
 
671
 
727
 
672
struct intel_dp;
-
 
673
struct i915_psr {
728
struct i915_psr {
674
	struct mutex lock;
729
	bool sink_support;
675
	bool sink_support;
-
 
676
	bool source_ok;
-
 
677
	struct intel_dp *enabled;
-
 
678
	bool active;
-
 
679
	struct delayed_work work;
730
	bool source_ok;
680
	unsigned busy_frontbuffer_bits;
Line 731... Line 681...
731
};
681
};
732
 
682
 
733
enum intel_pch {
683
enum intel_pch {
Line 744... Line 694...
744
};
694
};
Line 745... Line 695...
745
 
695
 
746
#define QUIRK_PIPEA_FORCE (1<<0)
696
#define QUIRK_PIPEA_FORCE (1<<0)
747
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
697
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
-
 
698
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
Line 748... Line 699...
748
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
699
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
749
 
700
 
Line 750... Line 701...
750
struct intel_fbdev;
701
struct intel_fbdev;
Line 841... Line 792...
841
	u32 savePP_CONTROL;
792
	u32 savePP_CONTROL;
842
	u32 savePP_DIVISOR;
793
	u32 savePP_DIVISOR;
843
	u32 savePFIT_CONTROL;
794
	u32 savePFIT_CONTROL;
844
	u32 save_palette_a[256];
795
	u32 save_palette_a[256];
845
	u32 save_palette_b[256];
796
	u32 save_palette_b[256];
846
	u32 saveDPFC_CB_BASE;
-
 
847
	u32 saveFBC_CFB_BASE;
-
 
848
	u32 saveFBC_LL_BASE;
-
 
849
	u32 saveFBC_CONTROL;
797
	u32 saveFBC_CONTROL;
850
	u32 saveFBC_CONTROL2;
-
 
851
	u32 saveIER;
798
	u32 saveIER;
852
	u32 saveIIR;
799
	u32 saveIIR;
853
	u32 saveIMR;
800
	u32 saveIMR;
854
	u32 saveDEIER;
801
	u32 saveDEIER;
855
	u32 saveDEIMR;
802
	u32 saveDEIMR;
Line 910... Line 857...
910
	u32 savePIPEB_LINK_N1;
857
	u32 savePIPEB_LINK_N1;
911
	u32 saveMCHBAR_RENDER_STANDBY;
858
	u32 saveMCHBAR_RENDER_STANDBY;
912
	u32 savePCH_PORT_HOTPLUG;
859
	u32 savePCH_PORT_HOTPLUG;
913
};
860
};
Line -... Line 861...
-
 
861
 
-
 
862
struct vlv_s0ix_state {
-
 
863
	/* GAM */
-
 
864
	u32 wr_watermark;
-
 
865
	u32 gfx_prio_ctrl;
-
 
866
	u32 arb_mode;
-
 
867
	u32 gfx_pend_tlb0;
-
 
868
	u32 gfx_pend_tlb1;
-
 
869
	u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
-
 
870
	u32 media_max_req_count;
-
 
871
	u32 gfx_max_req_count;
-
 
872
	u32 render_hwsp;
-
 
873
	u32 ecochk;
-
 
874
	u32 bsd_hwsp;
-
 
875
	u32 blt_hwsp;
-
 
876
	u32 tlb_rd_addr;
-
 
877
 
-
 
878
	/* MBC */
-
 
879
	u32 g3dctl;
-
 
880
	u32 gsckgctl;
-
 
881
	u32 mbctl;
-
 
882
 
-
 
883
	/* GCP */
-
 
884
	u32 ucgctl1;
-
 
885
	u32 ucgctl3;
-
 
886
	u32 rcgctl1;
-
 
887
	u32 rcgctl2;
-
 
888
	u32 rstctl;
-
 
889
	u32 misccpctl;
-
 
890
 
-
 
891
	/* GPM */
-
 
892
	u32 gfxpause;
-
 
893
	u32 rpdeuhwtc;
-
 
894
	u32 rpdeuc;
-
 
895
	u32 ecobus;
-
 
896
	u32 pwrdwnupctl;
-
 
897
	u32 rp_down_timeout;
-
 
898
	u32 rp_deucsw;
-
 
899
	u32 rcubmabdtmr;
-
 
900
	u32 rcedata;
-
 
901
	u32 spare2gh;
-
 
902
 
-
 
903
	/* Display 1 CZ domain */
-
 
904
	u32 gt_imr;
-
 
905
	u32 gt_ier;
-
 
906
	u32 pm_imr;
-
 
907
	u32 pm_ier;
-
 
908
	u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
-
 
909
 
-
 
910
	/* GT SA CZ domain */
-
 
911
	u32 tilectl;
-
 
912
	u32 gt_fifoctl;
-
 
913
	u32 gtlc_wake_ctrl;
-
 
914
	u32 gtlc_survive;
-
 
915
	u32 pmwgicz;
-
 
916
 
-
 
917
	/* Display 2 CZ domain */
-
 
918
	u32 gu_ctl0;
-
 
919
	u32 gu_ctl1;
-
 
920
	u32 clock_gate_dis2;
-
 
921
};
-
 
922
 
-
 
923
struct intel_rps_ei {
-
 
924
	u32 cz_clock;
-
 
925
	u32 render_c0;
-
 
926
	u32 media_c0;
-
 
927
};
914
 
928
 
915
struct intel_gen6_power_mgmt {
929
struct intel_gen6_power_mgmt {
916
	/* work and pm_iir are protected by dev_priv->irq_lock */
930
	/* work and pm_iir are protected by dev_priv->irq_lock */
917
	struct work_struct work;
931
	struct work_struct work;
Line -... Line 932...
-
 
932
	u32 pm_iir;
-
 
933
 
918
	u32 pm_iir;
934
	/* Frequencies are stored in potentially platform dependent multiples.
-
 
935
	 * In other words, *_freq needs to be multiplied by X to be interesting.
919
 
936
	 * Soft limits are those which are used for the dynamic reclocking done
920
	/* The below variables an all the rps hw state are protected by
937
	 * by the driver (raise frequencies under heavy loads, and lower for
-
 
938
	 * lighter loads). Hard limits are those imposed by the hardware.
-
 
939
	 *
921
	 * dev->struct mutext. */
940
	 * A distinction is made for overclocking, which is never enabled by
922
	u8 cur_delay;
941
	 * default, and is considered to be above the hard limit if it's
-
 
942
	 * possible at all.
-
 
943
	 */
-
 
944
	u8 cur_freq;		/* Current frequency (cached, may not == HW) */
-
 
945
	u8 min_freq_softlimit;	/* Minimum frequency permitted by the driver */
-
 
946
	u8 max_freq_softlimit;	/* Max frequency permitted by the driver */
-
 
947
	u8 max_freq;		/* Maximum frequency, RP0 if not overclocking */
923
	u8 min_delay;
948
	u8 min_freq;		/* AKA RPn. Minimum frequency */
924
	u8 max_delay;
949
	u8 efficient_freq;	/* AKA RPe. Pre-determined balanced frequency */
925
	u8 rpe_delay;
950
	u8 rp1_freq;		/* "less than" RP0 power/freqency */
-
 
951
	u8 rp0_freq;		/* Non-overclocked max frequency. */
926
	u8 rp1_delay;
952
	u32 cz_freq;
Line 927... Line 953...
927
	u8 rp0_delay;
953
 
928
	u8 hw_max;
954
	u32 ei_interrupt_count;
Line 929... Line 955...
929
 
955
 
930
	int last_adj;
956
	int last_adj;
Line -... Line 957...
-
 
957
	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
-
 
958
 
-
 
959
	bool enabled;
931
	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
960
	struct delayed_work delayed_resume_work;
932
 
961
 
933
	bool enabled;
962
	/* manual wa residency calculations */
934
	struct delayed_work delayed_resume_work;
963
	struct intel_rps_ei up_ei, down_ei;
935
 
964
 
Line 952... Line 981...
952
 
981
 
953
	u64 last_count1;
982
	u64 last_count1;
954
	unsigned long last_time1;
983
	unsigned long last_time1;
955
	unsigned long chipset_power;
984
	unsigned long chipset_power;
956
	u64 last_count2;
985
	u64 last_count2;
957
	struct timespec last_time2;
986
	u64 last_time2;
958
	unsigned long gfx_power;
987
	unsigned long gfx_power;
Line 959... Line 988...
959
	u8 corr;
988
	u8 corr;
960
 
989
 
Line 961... Line 990...
961
	int c_m;
990
	int c_m;
962
	int r_t;
991
	int r_t;
963
 
992
 
Line -... Line 993...
-
 
993
	struct drm_i915_gem_object *pwrctx;
-
 
994
	struct drm_i915_gem_object *renderctx;
-
 
995
};
-
 
996
 
-
 
997
struct drm_i915_private;
-
 
998
struct i915_power_well;
-
 
999
 
-
 
1000
struct i915_power_well_ops {
-
 
1001
	/*
-
 
1002
	 * Synchronize the well's hw state to match the current sw state, for
-
 
1003
	 * example enable/disable it based on the current refcount. Called
-
 
1004
	 * during driver init and resume time, possibly after first calling
-
 
1005
	 * the enable/disable handlers.
-
 
1006
	 */
-
 
1007
	void (*sync_hw)(struct drm_i915_private *dev_priv,
-
 
1008
			struct i915_power_well *power_well);
-
 
1009
	/*
-
 
1010
	 * Enable the well and resources that depend on it (for example
-
 
1011
	 * interrupts located on the well). Called after the 0->1 refcount
-
 
1012
	 * transition.
-
 
1013
	 */
-
 
1014
	void (*enable)(struct drm_i915_private *dev_priv,
-
 
1015
		       struct i915_power_well *power_well);
-
 
1016
	/*
-
 
1017
	 * Disable the well and resources that depend on it. Called after
-
 
1018
	 * the 1->0 refcount transition.
-
 
1019
	 */
-
 
1020
	void (*disable)(struct drm_i915_private *dev_priv,
-
 
1021
			struct i915_power_well *power_well);
-
 
1022
	/* Returns the hw enabled state. */
964
	struct drm_i915_gem_object *pwrctx;
1023
	bool (*is_enabled)(struct drm_i915_private *dev_priv,
965
	struct drm_i915_gem_object *renderctx;
1024
			   struct i915_power_well *power_well);
966
};
1025
};
967
 
1026
 
968
/* Power well structure for haswell */
1027
/* Power well structure for haswell */
969
struct i915_power_well {
1028
struct i915_power_well {
-
 
1029
	const char *name;
-
 
1030
	bool always_on;
970
	const char *name;
1031
	/* power well enable/disable usage count */
971
	bool always_on;
1032
	int count;
972
	/* power well enable/disable usage count */
-
 
973
	int count;
-
 
974
	unsigned long domains;
-
 
975
	void *data;
1033
	/* cached hw enabled state */
976
	void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
1034
	bool hw_enabled;
Line 977... Line 1035...
977
		    bool enable);
1035
	unsigned long domains;
978
	bool (*is_enabled)(struct drm_device *dev,
1036
	unsigned long data;
979
			   struct i915_power_well *power_well);
1037
	const struct i915_power_well_ops *ops;
980
};
1038
};
981
 
1039
 
982
struct i915_power_domains {
1040
struct i915_power_domains {
-
 
1041
	/*
983
	/*
1042
	 * Power wells needed for initialization at driver init and suspend
Line 984... Line 1043...
984
	 * Power wells needed for initialization at driver init and suspend
1043
	 * time are on. They are kept on until after the first modeset.
985
	 * time are on. They are kept on until after the first modeset.
1044
	 */
986
	 */
1045
	bool init_power_on;
Line 1068... Line 1127...
1068
	 * Are we in a non-interruptible section of code like
1127
	 * Are we in a non-interruptible section of code like
1069
	 * modesetting?
1128
	 * modesetting?
1070
	 */
1129
	 */
1071
	bool interruptible;
1130
	bool interruptible;
Line -... Line 1131...
-
 
1131
 
-
 
1132
	/**
-
 
1133
	 * Is the GPU currently considered idle, or busy executing userspace
-
 
1134
	 * requests?  Whilst idle, we attempt to power down the hardware and
-
 
1135
	 * display clocks. In order to reduce the effect on performance, there
-
 
1136
	 * is a slight delay before we do so.
-
 
1137
	 */
-
 
1138
	bool busy;
-
 
1139
 
-
 
1140
	/* the indicator for dispatch video commands on two BSD rings */
-
 
1141
	int bsd_ring_dispatch_index;
1072
 
1142
 
1073
	/** Bit 6 swizzling required for X tiling */
1143
	/** Bit 6 swizzling required for X tiling */
1074
	uint32_t bit_6_swizzle_x;
1144
	uint32_t bit_6_swizzle_x;
1075
	/** Bit 6 swizzling required for Y tiling */
1145
	/** Bit 6 swizzling required for Y tiling */
Line 1076... Line -...
1076
	uint32_t bit_6_swizzle_y;
-
 
1077
 
-
 
1078
	/* storage for physical objects */
-
 
1079
	struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
1146
	uint32_t bit_6_swizzle_y;
1080
 
1147
 
1081
	/* accounting, useful for userland debugging */
1148
	/* accounting, useful for userland debugging */
1082
	spinlock_t object_stat_lock;
1149
	spinlock_t object_stat_lock;
1083
	size_t object_memory;
1150
	size_t object_memory;
Line 1146... Line 1213...
1146
	 * Waitqueue to signal when the reset has completed. Used by clients
1213
	 * Waitqueue to signal when the reset has completed. Used by clients
1147
	 * that wait for dev_priv->mm.wedged to settle.
1214
	 * that wait for dev_priv->mm.wedged to settle.
1148
	 */
1215
	 */
1149
	wait_queue_head_t reset_queue;
1216
	wait_queue_head_t reset_queue;
Line 1150... Line 1217...
1150
 
1217
 
-
 
1218
	/* Userspace knobs for gpu hang simulation;
-
 
1219
	 * combines both a ring mask, and extra flags
1151
	/* For gpu hang simulation. */
1220
	 */
-
 
1221
	u32 stop_rings;
-
 
1222
#define I915_STOP_RING_ALLOW_BAN       (1 << 31)
Line 1152... Line 1223...
1152
	unsigned int stop_rings;
1223
#define I915_STOP_RING_ALLOW_WARN      (1 << 30)
1153
 
1224
 
1154
	/* For missed irq/seqno simulation. */
1225
	/* For missed irq/seqno simulation. */
Line 1167... Line 1238...
1167
	uint8_t supports_dvi:1;
1238
	uint8_t supports_dvi:1;
1168
	uint8_t supports_hdmi:1;
1239
	uint8_t supports_hdmi:1;
1169
	uint8_t supports_dp:1;
1240
	uint8_t supports_dp:1;
1170
};
1241
};
Line -... Line 1242...
-
 
1242
 
-
 
1243
enum drrs_support_type {
-
 
1244
	DRRS_NOT_SUPPORTED = 0,
-
 
1245
	STATIC_DRRS_SUPPORT = 1,
-
 
1246
	SEAMLESS_DRRS_SUPPORT = 2
-
 
1247
};
1171
 
1248
 
1172
struct intel_vbt_data {
1249
struct intel_vbt_data {
1173
	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1250
	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
Line 1174... Line 1251...
1174
	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1251
	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
Line 1179... Line 1256...
1179
	unsigned int lvds_vbt:1;
1256
	unsigned int lvds_vbt:1;
1180
	unsigned int int_crt_support:1;
1257
	unsigned int int_crt_support:1;
1181
	unsigned int lvds_use_ssc:1;
1258
	unsigned int lvds_use_ssc:1;
1182
	unsigned int display_clock_mode:1;
1259
	unsigned int display_clock_mode:1;
1183
	unsigned int fdi_rx_polarity_inverted:1;
1260
	unsigned int fdi_rx_polarity_inverted:1;
-
 
1261
	unsigned int has_mipi:1;
1184
	int lvds_ssc_freq;
1262
	int lvds_ssc_freq;
1185
	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1263
	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
Line -... Line 1264...
-
 
1264
 
-
 
1265
	enum drrs_support_type drrs_type;
1186
 
1266
 
1187
	/* eDP */
1267
	/* eDP */
1188
	int edp_rate;
1268
	int edp_rate;
1189
	int edp_lanes;
1269
	int edp_lanes;
1190
	int edp_preemphasis;
1270
	int edp_preemphasis;
Line 1194... Line 1274...
1194
	int edp_bpp;
1274
	int edp_bpp;
1195
	struct edp_power_seq edp_pps;
1275
	struct edp_power_seq edp_pps;
Line 1196... Line 1276...
1196
 
1276
 
1197
	struct {
1277
	struct {
-
 
1278
		u16 pwm_freq_hz;
1198
		u16 pwm_freq_hz;
1279
		bool present;
-
 
1280
		bool active_low_pwm;
1199
		bool active_low_pwm;
1281
		u8 min_brightness;	/* min_brightness/255 of max */
Line 1200... Line 1282...
1200
	} backlight;
1282
	} backlight;
1201
 
1283
 
-
 
1284
	/* MIPI DSI */
1202
	/* MIPI DSI */
1285
	struct {
-
 
1286
		u16 port;
-
 
1287
		u16 panel_id;
-
 
1288
		struct mipi_config *config;
-
 
1289
		struct mipi_pps_data *pps;
-
 
1290
		u8 seq_version;
-
 
1291
		u32 size;
1203
	struct {
1292
		u8 *data;
Line 1204... Line 1293...
1204
		u16 panel_id;
1293
		u8 *sequence[MIPI_SEQ_MAX];
Line 1205... Line 1294...
1205
	} dsi;
1294
	} dsi;
Line 1233... Line 1322...
1233
	bool enable_fbc_wm;
1322
	bool enable_fbc_wm;
1234
	enum intel_ddb_partitioning partitioning;
1323
	enum intel_ddb_partitioning partitioning;
1235
};
1324
};
Line 1236... Line 1325...
1236
 
1325
 
1237
/*
1326
/*
1238
 * This struct tracks the state needed for the Package C8+ feature.
-
 
1239
 *
-
 
1240
 * Package states C8 and deeper are really deep PC states that can only be
-
 
1241
 * reached when all the devices on the system allow it, so even if the graphics
1327
 * This struct helps tracking the state needed for runtime PM, which puts the
1242
 * device allows PC8+, it doesn't mean the system will actually get to these
-
 
1243
 * states.
-
 
1244
 *
-
 
1245
 * Our driver only allows PC8+ when all the outputs are disabled, the power well
-
 
1246
 * is disabled and the GPU is idle. When these conditions are met, we manually
-
 
1247
 * do the other conditions: disable the interrupts, clocks and switch LCPLL
-
 
1248
 * refclk to Fclk.
-
 
1249
 *
-
 
1250
 * When we really reach PC8 or deeper states (not just when we allow it) we lose
-
 
1251
 * the state of some registers, so when we come back from PC8+ we need to
-
 
1252
 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
-
 
1253
 * need to take care of the registers kept by RC6.
-
 
1254
 *
-
 
1255
 * The interrupt disabling is part of the requirements. We can only leave the
1328
 * device in PCI D3 state. Notice that when this happens, nothing on the
1256
 * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
1329
 * graphics device works, even register access, so we don't get interrupts nor
1257
 * can lock the machine.
1330
 * anything else.
1258
 *
1331
 *
1259
 * Ideally every piece of our code that needs PC8+ disabled would call
-
 
1260
 * hsw_disable_package_c8, which would increment disable_count and prevent the
-
 
1261
 * system from reaching PC8+. But we don't have a symmetric way to do this for
-
 
1262
 * everything, so we have the requirements_met and gpu_idle variables. When we
-
 
1263
 * switch requirements_met or gpu_idle to true we decrease disable_count, and
-
 
1264
 * increase it in the opposite case. The requirements_met variable is true when
1332
 * Every piece of our code that needs to actually touch the hardware needs to
1265
 * all the CRTCs, encoders and the power well are disabled. The gpu_idle
1333
 * either call intel_runtime_pm_get or call intel_display_power_get with the
1266
 * variable is true when the GPU is idle.
1334
 * appropriate power domain.
1267
 *
-
 
1268
 * In addition to everything, we only actually enable PC8+ if disable_count
-
 
1269
 * stays at zero for at least some seconds. This is implemented with the
-
 
1270
 * enable_work variable. We do this so we don't enable/disable PC8 dozens of
1335
 *
1271
 * consecutive times when all screens are disabled and some background app
1336
 * Our driver uses the autosuspend delay feature, which means we'll only really
1272
 * queries the state of our connectors, or we have some application constantly
1337
 * suspend if we stay with zero refcount for a certain amount of time. The
1273
 * waking up to use the GPU. Only after the enable_work function actually
1338
 * default value is currently very conservative (see intel_init_runtime_pm), but
1274
 * enables PC8+ the "enable" variable will become true, which means that it can
-
 
1275
 * be false even if disable_count is 0.
1339
 * it can be changed with the standard runtime PM files from sysfs.
1276
 *
1340
 *
1277
 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1341
 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1278
 * goes back to false exactly before we reenable the IRQs. We use this variable
1342
 * goes back to false exactly before we reenable the IRQs. We use this variable
1279
 * to check if someone is trying to enable/disable IRQs while they're supposed
1343
 * to check if someone is trying to enable/disable IRQs while they're supposed
1280
 * to be disabled. This shouldn't happen and we'll print some error messages in
-
 
1281
 * case it happens, but if it actually happens we'll also update the variables
-
 
1282
 * inside struct regsave so when we restore the IRQs they will contain the
1344
 * to be disabled. This shouldn't happen and we'll print some error messages in
1283
 * latest expected values.
1345
 * case it happens.
1284
 *
1346
 *
1285
 * For more, read "Display Sequences for Package C8" on our documentation.
1347
 * For more, read the Documentation/power/runtime_pm.txt.
1286
 */
-
 
1287
struct i915_package_c8 {
-
 
1288
	bool requirements_met;
-
 
1289
	bool gpu_idle;
-
 
1290
	bool irqs_disabled;
-
 
1291
	/* Only true after the delayed work task actually enables it. */
-
 
1292
	bool enabled;
-
 
1293
	int disable_count;
-
 
1294
	struct mutex lock;
-
 
1295
	struct delayed_work enable_work;
-
 
1296
 
-
 
1297
	struct {
-
 
1298
		uint32_t deimr;
-
 
1299
		uint32_t sdeimr;
-
 
1300
		uint32_t gtimr;
-
 
1301
		uint32_t gtier;
-
 
1302
		uint32_t gen6_pmimr;
-
 
1303
	} regsave;
-
 
1304
};
-
 
1305
 
1348
 */
1306
struct i915_runtime_pm {
1349
struct i915_runtime_pm {
-
 
1350
	bool suspended;
1307
	bool suspended;
1351
	bool _irqs_disabled;
Line 1308... Line 1352...
1308
};
1352
};
1309
 
1353
 
1310
enum intel_pipe_crc_source {
1354
enum intel_pipe_crc_source {
Line 1335... Line 1379...
1335
	enum intel_pipe_crc_source source;
1379
	enum intel_pipe_crc_source source;
1336
	int head, tail;
1380
	int head, tail;
1337
	wait_queue_head_t wq;
1381
	wait_queue_head_t wq;
1338
};
1382
};
Line -... Line 1383...
-
 
1383
 
-
 
1384
struct i915_frontbuffer_tracking {
-
 
1385
	struct mutex lock;
-
 
1386
 
-
 
1387
	/*
-
 
1388
	 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
-
 
1389
	 * scheduled flips.
-
 
1390
	 */
-
 
1391
	unsigned busy_bits;
-
 
1392
	unsigned flip_bits;
-
 
1393
};
1339
 
1394
 
1340
typedef struct drm_i915_private {
1395
struct drm_i915_private {
Line 1341... Line 1396...
1341
	struct drm_device *dev;
1396
	struct drm_device *dev;
Line 1342... Line 1397...
1342
 
1397
 
Line 1343... Line 1398...
1343
	const struct intel_device_info *info;
1398
	const struct intel_device_info info;
Line 1358... Line 1413...
1358
	/**
1413
	/**
1359
	 * Base address of the gmbus and gpio block.
1414
	 * Base address of the gmbus and gpio block.
1360
	 */
1415
	 */
1361
	uint32_t gpio_mmio_base;
1416
	uint32_t gpio_mmio_base;
Line -... Line 1417...
-
 
1417
 
-
 
1418
	/* MMIO base address for MIPI regs */
-
 
1419
	uint32_t mipi_mmio_base;
1362
 
1420
 
Line 1363... Line 1421...
1363
	wait_queue_head_t gmbus_wait_queue;
1421
	wait_queue_head_t gmbus_wait_queue;
1364
 
1422
 
-
 
1423
	struct pci_dev *bridge_dev;
1365
	struct pci_dev *bridge_dev;
1424
	struct intel_engine_cs ring[I915_NUM_RINGS];
Line 1366... Line 1425...
1366
	struct intel_ring_buffer ring[I915_NUM_RINGS];
1425
	struct drm_i915_gem_object *semaphore_obj;
1367
	uint32_t last_seqno, next_seqno;
1426
	uint32_t last_seqno, next_seqno;
Line 1368... Line -...
1368
 
-
 
1369
	drm_dma_handle_t *status_page_dmah;
-
 
1370
	struct resource mch_res;
1427
 
1371
 
1428
	drm_dma_handle_t *status_page_dmah;
Line -... Line 1429...
-
 
1429
	struct resource mch_res;
-
 
1430
 
-
 
1431
	/* protects the irq masks */
-
 
1432
	spinlock_t irq_lock;
-
 
1433
 
1372
	atomic_t irq_received;
1434
	/* protects the mmio flip data */
1373
 
1435
	spinlock_t mmio_flip_lock;
Line 1374... Line 1436...
1374
	/* protects the irq masks */
1436
 
1375
	spinlock_t irq_lock;
1437
	bool display_irqs_enabled;
Line 1385... Line 1447...
1385
	u32 irq_mask;
1447
	u32 irq_mask;
1386
		u32 de_irq_mask[I915_MAX_PIPES];
1448
		u32 de_irq_mask[I915_MAX_PIPES];
1387
	};
1449
	};
1388
	u32 gt_irq_mask;
1450
	u32 gt_irq_mask;
1389
	u32 pm_irq_mask;
1451
	u32 pm_irq_mask;
-
 
1452
	u32 pm_rps_events;
-
 
1453
	u32 pipestat_irq_mask[I915_MAX_PIPES];
Line 1390... Line 1454...
1390
 
1454
 
1391
	struct work_struct hotplug_work;
-
 
1392
	bool enable_hotplug_processing;
1455
	struct work_struct hotplug_work;
1393
	struct {
1456
	struct {
1394
		unsigned long hpd_last_jiffies;
1457
		unsigned long hpd_last_jiffies;
1395
		int hpd_cnt;
1458
		int hpd_cnt;
1396
		enum {
1459
		enum {
1397
			HPD_ENABLED = 0,
1460
			HPD_ENABLED = 0,
1398
			HPD_DISABLED = 1,
1461
			HPD_DISABLED = 1,
1399
			HPD_MARK_DISABLED = 2
1462
			HPD_MARK_DISABLED = 2
1400
		} hpd_mark;
1463
		} hpd_mark;
1401
	} hpd_stats[HPD_NUM_PINS];
1464
	} hpd_stats[HPD_NUM_PINS];
1402
	u32 hpd_event_bits;
1465
	u32 hpd_event_bits;
1403
	struct timer_list hotplug_reenable_timer;
-
 
1404
 
-
 
Line 1405... Line 1466...
1405
	int num_plane;
1466
	struct delayed_work hotplug_reenable_work;
-
 
1467
 
1406
 
1468
	struct i915_fbc fbc;
1407
	struct i915_fbc fbc;
1469
	struct i915_drrs drrs;
Line 1408... Line 1470...
1408
	struct intel_opregion opregion;
1470
	struct intel_opregion opregion;
1409
	struct intel_vbt_data vbt;
1471
	struct intel_vbt_data vbt;
Line 1420... Line 1482...
1420
	struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1482
	struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1421
	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1483
	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1422
	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1484
	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
Line 1423... Line 1485...
1423
 
1485
 
-
 
1486
	unsigned int fsb_freq, mem_freq, is_ddr3;
Line 1424... Line 1487...
1424
	unsigned int fsb_freq, mem_freq, is_ddr3;
1487
	unsigned int vlv_cdclk_freq;
1425
 
1488
 
1426
	/**
1489
	/**
1427
	 * wq - Driver workqueue for GEM.
1490
	 * wq - Driver workqueue for GEM.
Line 1443... Line 1506...
1443
 
1506
 
1444
	enum modeset_restore modeset_restore;
1507
	enum modeset_restore modeset_restore;
Line 1445... Line 1508...
1445
	struct mutex modeset_restore_lock;
1508
	struct mutex modeset_restore_lock;
1446
 
1509
 
Line 1447... Line 1510...
1447
	struct list_head vm_list; /* Global list of all address spaces */
1510
	struct list_head vm_list; /* Global list of all address spaces */
-
 
1511
	struct i915_gtt gtt; /* VM representing the global address space */
-
 
1512
 
-
 
1513
	struct i915_gem_mm mm;
Line 1448... Line 1514...
1448
	struct i915_gtt gtt; /* VMA representing the global address space */
1514
#if defined(CONFIG_MMU_NOTIFIER)
Line 1449... Line 1515...
1449
 
1515
	DECLARE_HASHTABLE(mmu_notifiers, 7);
Line 1450... Line 1516...
1450
	struct i915_gem_mm mm;
1516
#endif
1451
 
1517
 
1452
	/* Kernel Modesetting */
1518
	/* Kernel Modesetting */
Line 1453... Line 1519...
1453
 
1519
 
1454
    struct sdvo_device_mapping sdvo_mappings[2];
1520
    struct sdvo_device_mapping sdvo_mappings[2];
1455
 
1521
 
Line 1456... Line 1522...
1456
    struct drm_crtc *plane_to_crtc_mapping[3];
1522
	struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
1457
    struct drm_crtc *pipe_to_crtc_mapping[3];
1523
	struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
1458
	wait_queue_head_t pending_flip_queue;
-
 
1459
 
1524
	wait_queue_head_t pending_flip_queue;
Line 1460... Line 1525...
1460
#ifdef CONFIG_DEBUG_FS
1525
 
1461
	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1526
#ifdef CONFIG_DEBUG_FS
1462
#endif
1527
	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1463
 
1528
#endif
1464
	int num_shared_dpll;
1529
 
-
 
1530
	int num_shared_dpll;
-
 
1531
	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
-
 
1532
	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
1465
	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1533
 
Line 1466... Line 1534...
1466
	struct intel_ddi_plls ddi_plls;
1534
	/* Reclocking support */
Line 1467... Line 1535...
1467
	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
1535
	bool render_reclock_avail;
Line 1512... Line 1580...
1512
	uint32_t hw_context_size;
1580
	uint32_t hw_context_size;
1513
	struct list_head context_list;
1581
	struct list_head context_list;
Line 1514... Line 1582...
1514
 
1582
 
Line -... Line 1583...
-
 
1583
	u32 fdi_rx_config;
1515
	u32 fdi_rx_config;
1584
 
-
 
1585
	u32 suspend_count;
Line 1516... Line 1586...
1516
 
1586
	struct i915_suspend_saved_registers regfile;
1517
	struct i915_suspend_saved_registers regfile;
1587
	struct vlv_s0ix_state vlv_s0ix_state;
1518
 
1588
 
1519
	struct {
1589
	struct {
Line 1531... Line 1601...
1531
 
1601
 
1532
		/* current hardware state */
1602
		/* current hardware state */
1533
		struct ilk_wm_values hw;
1603
		struct ilk_wm_values hw;
Line 1534... Line -...
1534
	} wm;
-
 
1535
 
-
 
1536
	struct i915_package_c8 pc8;
1604
	} wm;
Line -... Line 1605...
-
 
1605
 
-
 
1606
	struct i915_runtime_pm pm;
-
 
1607
 
-
 
1608
	struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
-
 
1609
	u32 long_hpd_port_mask;
-
 
1610
	u32 short_hpd_port_mask;
-
 
1611
	struct work_struct dig_port_work;
-
 
1612
 
-
 
1613
	/*
-
 
1614
	 * if we get a HPD irq from DP and a HPD irq from non-DP
-
 
1615
	 * the non-DP HPD could block the workqueue on a mode config
-
 
1616
	 * mutex getting, that userspace may have taken. However
-
 
1617
	 * userspace is waiting on the DP workqueue to run which is
-
 
1618
	 * blocked behind the non-DP one.
1537
 
1619
	 */
1538
	struct i915_runtime_pm pm;
1620
	struct workqueue_struct *dp_wq;
1539
 
1621
 
1540
	/* Old dri1 support infrastructure, beware the dragons ya fools entering
1622
	/* Old dri1 support infrastructure, beware the dragons ya fools entering
1541
	 * here! */
1623
	 * here! */
-
 
1624
	struct i915_dri1_state dri1;
-
 
1625
	/* Old ums support infrastructure, same warning applies. */
-
 
1626
	struct i915_ums_state ums;
1542
	struct i915_dri1_state dri1;
1627
 
-
 
1628
	/*
-
 
1629
	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
Line 1543... Line 1630...
1543
	/* Old ums support infrastructure, same warning applies. */
1630
	 * will be rejected. Instead look for a better place.
1544
	struct i915_ums_state ums;
1631
	 */
1545
} drm_i915_private_t;
1632
};
1546
 
1633
 
Line 1577... Line 1664...
1577
	 * being released or under memory pressure (where we attempt to
1664
	 * being released or under memory pressure (where we attempt to
1578
	 * reap pages for the shrinker).
1665
	 * reap pages for the shrinker).
1579
	 */
1666
	 */
1580
	int (*get_pages)(struct drm_i915_gem_object *);
1667
	int (*get_pages)(struct drm_i915_gem_object *);
1581
	void (*put_pages)(struct drm_i915_gem_object *);
1668
	void (*put_pages)(struct drm_i915_gem_object *);
-
 
1669
	int (*dmabuf_export)(struct drm_i915_gem_object *);
-
 
1670
	void (*release)(struct drm_i915_gem_object *);
1582
};
1671
};
Line -... Line 1672...
-
 
1672
 
-
 
1673
/*
-
 
1674
 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
-
 
1675
 * considered to be the frontbuffer for the given plane interface-vise. This
-
 
1676
 * doesn't mean that the hw necessarily already scans it out, but that any
-
 
1677
 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
-
 
1678
 *
-
 
1679
 * We have one bit per pipe and per scanout plane type.
-
 
1680
 */
-
 
1681
#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4
-
 
1682
#define INTEL_FRONTBUFFER_BITS \
-
 
1683
	(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
-
 
1684
#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
-
 
1685
	(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
-
 
1686
#define INTEL_FRONTBUFFER_CURSOR(pipe) \
-
 
1687
	(1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
-
 
1688
#define INTEL_FRONTBUFFER_SPRITE(pipe) \
-
 
1689
	(1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
-
 
1690
#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
-
 
1691
	(1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
-
 
1692
#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
-
 
1693
	(0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
1583
 
1694
 
1584
struct drm_i915_gem_object {
1695
struct drm_i915_gem_object {
Line 1585... Line 1696...
1585
    struct drm_gem_object base;
1696
    struct drm_gem_object base;
Line 1633... Line 1744...
1633
	 * slot that the object occupies whilst it executes a fenced
1744
	 * slot that the object occupies whilst it executes a fenced
1634
	 * command (such as BLT on gen2/3), as a "fence".
1745
	 * command (such as BLT on gen2/3), as a "fence".
1635
	 */
1746
	 */
1636
	unsigned int fence_dirty:1;
1747
	unsigned int fence_dirty:1;
Line 1637... Line -...
1637
 
-
 
1638
    /** How many users have pinned this object in GTT space. The following
-
 
1639
     * users can each hold at most one reference: pwrite/pread, pin_ioctl
-
 
1640
     * (via user_pin_count), execbuffer (objects are not allowed multiple
-
 
1641
     * times for the same batchbuffer), and the framebuffer code. When
-
 
1642
     * switching/pageflipping, the framebuffer code has at most two buffers
-
 
1643
     * pinned per crtc.
-
 
1644
     *
-
 
1645
     * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
-
 
1646
     * bits with absolutely no headroom. So use 4 bits. */
-
 
1647
	unsigned int pin_count:4;
-
 
1648
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
-
 
1649
 
1748
 
1650
    /**
1749
    /**
1651
     * Is the object at the current location in the gtt mappable and
1750
     * Is the object at the current location in the gtt mappable and
1652
     * fenceable? Used to avoid costly recalculations.
1751
     * fenceable? Used to avoid costly recalculations.
1653
     */
1752
     */
Line 1661... Line 1760...
1661
	unsigned int fault_mappable:1;
1760
	unsigned int fault_mappable:1;
1662
	unsigned int pin_mappable:1;
1761
	unsigned int pin_mappable:1;
1663
	unsigned int pin_display:1;
1762
	unsigned int pin_display:1;
Line 1664... Line 1763...
1664
 
1763
 
-
 
1764
    /*
-
 
1765
	 * Is the object to be mapped as read-only to the GPU
-
 
1766
	 * Only honoured if hardware has relevant pte bit
-
 
1767
	 */
-
 
1768
	unsigned long gt_ro:1;
-
 
1769
 
1665
    /*
1770
	/*
1666
     * Is the GPU currently using a fence to access this buffer,
1771
     * Is the GPU currently using a fence to access this buffer,
1667
     */
1772
     */
1668
    unsigned int pending_fenced_gpu_access:1;
1773
    unsigned int pending_fenced_gpu_access:1;
Line 1672... Line 1777...
1672
 
1777
 
1673
	unsigned int has_aliasing_ppgtt_mapping:1;
1778
	unsigned int has_aliasing_ppgtt_mapping:1;
1674
	unsigned int has_global_gtt_mapping:1;
1779
	unsigned int has_global_gtt_mapping:1;
Line -... Line 1780...
-
 
1780
	unsigned int has_dma_mapping:1;
-
 
1781
 
1675
	unsigned int has_dma_mapping:1;
1782
	unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
1676
 
1783
 
Line 1677... Line 1784...
1677
	struct sg_table *pages;
1784
	struct sg_table *pages;
1678
	int pages_pin_count;
1785
	int pages_pin_count;
1679
 
1786
 
Line 1680... Line 1787...
1680
	/* prime dma-buf support */
1787
	/* prime dma-buf support */
Line 1681... Line 1788...
1681
	void *dma_buf_vmapping;
1788
	void *dma_buf_vmapping;
1682
	int vmapping_count;
1789
	int vmapping_count;
1683
 
1790
 
1684
	struct intel_ring_buffer *ring;
1791
	struct intel_engine_cs *ring;
Line 1701... Line 1808...
1701
    /** User space pin count and filp owning the pin */
1808
    /** User space pin count and filp owning the pin */
1702
	unsigned long user_pin_count;
1809
	unsigned long user_pin_count;
1703
    struct drm_file *pin_filp;
1810
    struct drm_file *pin_filp;
Line 1704... Line 1811...
1704
 
1811
 
1705
    /** for phy allocated objects */
1812
    /** for phy allocated objects */
1706
    struct drm_i915_gem_phys_object *phys_obj;
-
 
1707
};
-
 
Line -... Line 1813...
-
 
1813
	drm_dma_handle_t *phys_handle;
-
 
1814
 
-
 
1815
	union {
-
 
1816
		struct i915_gem_userptr {
-
 
1817
			uintptr_t ptr;
-
 
1818
			unsigned read_only :1;
-
 
1819
			unsigned workers :4;
-
 
1820
#define I915_GEM_USERPTR_MAX_WORKERS 15
-
 
1821
 
-
 
1822
			struct mm_struct *mm;
-
 
1823
			struct i915_mmu_object *mn;
-
 
1824
			struct work_struct *work;
-
 
1825
		} userptr;
1708
#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
1826
	};
Line -... Line 1827...
-
 
1827
};
-
 
1828
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
-
 
1829
 
-
 
1830
void i915_gem_track_fb(struct drm_i915_gem_object *old,
1709
 
1831
		       struct drm_i915_gem_object *new,
1710
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1832
		       unsigned frontbuffer_bits);
1711
 
1833
 
1712
/**
1834
/**
1713
 * Request queue structure.
1835
 * Request queue structure.
Line 1719... Line 1841...
1719
 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1841
 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1720
 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1842
 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1721
 */
1843
 */
1722
struct drm_i915_gem_request {
1844
struct drm_i915_gem_request {
1723
	/** On Which ring this request was generated */
1845
	/** On Which ring this request was generated */
1724
	struct intel_ring_buffer *ring;
1846
	struct intel_engine_cs *ring;
Line 1725... Line 1847...
1725
 
1847
 
1726
	/** GEM sequence number associated with this request. */
1848
	/** GEM sequence number associated with this request. */
Line 1727... Line 1849...
1727
	uint32_t seqno;
1849
	uint32_t seqno;
Line 1731... Line 1853...
1731
 
1853
 
1732
	/** Position in the ringbuffer of the end of the request */
1854
	/** Position in the ringbuffer of the end of the request */
Line 1733... Line 1855...
1733
	u32 tail;
1855
	u32 tail;
1734
 
1856
 
Line 1735... Line 1857...
1735
	/** Context related to this request */
1857
	/** Context related to this request */
1736
	struct i915_hw_context *ctx;
1858
	struct intel_context *ctx;
Line 1737... Line 1859...
1737
 
1859
 
Line 1749... Line 1871...
1749
	struct list_head client_list;
1871
	struct list_head client_list;
1750
};
1872
};
Line 1751... Line 1873...
1751
 
1873
 
1752
struct drm_i915_file_private {
1874
struct drm_i915_file_private {
-
 
1875
	struct drm_i915_private *dev_priv;
Line 1753... Line 1876...
1753
	struct drm_i915_private *dev_priv;
1876
	struct drm_file *file;
1754
 
1877
 
1755
	struct {
1878
	struct {
1756
		spinlock_t lock;
1879
		spinlock_t lock;
1757
		struct list_head request_list;
1880
		struct list_head request_list;
1758
		struct delayed_work idle_work;
1881
		struct delayed_work idle_work;
Line 1759... Line -...
1759
	} mm;
-
 
1760
	struct idr context_idr;
1882
	} mm;
-
 
1883
	struct idr context_idr;
-
 
1884
 
-
 
1885
	atomic_t rps_wait_boost;
-
 
1886
	struct  intel_engine_cs *bsd_ring;
-
 
1887
};
-
 
1888
 
-
 
1889
/*
-
 
1890
 * A command that requires special handling by the command parser.
-
 
1891
 */
-
 
1892
struct drm_i915_cmd_descriptor {
-
 
1893
	/*
-
 
1894
	 * Flags describing how the command parser processes the command.
-
 
1895
	 *
-
 
1896
	 * CMD_DESC_FIXED: The command has a fixed length if this is set,
-
 
1897
	 *                 a length mask if not set
-
 
1898
	 * CMD_DESC_SKIP: The command is allowed but does not follow the
-
 
1899
	 *                standard length encoding for the opcode range in
-
 
1900
	 *                which it falls
-
 
1901
	 * CMD_DESC_REJECT: The command is never allowed
-
 
1902
	 * CMD_DESC_REGISTER: The command should be checked against the
-
 
1903
	 *                    register whitelist for the appropriate ring
-
 
1904
	 * CMD_DESC_MASTER: The command is allowed if the submitting process
-
 
1905
	 *                  is the DRM master
-
 
1906
	 */
-
 
1907
	u32 flags;
-
 
1908
#define CMD_DESC_FIXED    (1<<0)
-
 
1909
#define CMD_DESC_SKIP     (1<<1)
-
 
1910
#define CMD_DESC_REJECT   (1<<2)
-
 
1911
#define CMD_DESC_REGISTER (1<<3)
-
 
1912
#define CMD_DESC_BITMASK  (1<<4)
-
 
1913
#define CMD_DESC_MASTER   (1<<5)
-
 
1914
 
-
 
1915
	/*
-
 
1916
	 * The command's unique identification bits and the bitmask to get them.
-
 
1917
	 * This isn't strictly the opcode field as defined in the spec and may
-
 
1918
	 * also include type, subtype, and/or subop fields.
-
 
1919
	 */
-
 
1920
	struct {
-
 
1921
		u32 value;
-
 
1922
		u32 mask;
-
 
1923
	} cmd;
-
 
1924
 
-
 
1925
	/*
-
 
1926
	 * The command's length. The command is either fixed length (i.e. does
-
 
1927
	 * not include a length field) or has a length field mask. The flag
-
 
1928
	 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
-
 
1929
	 * a length mask. All command entries in a command table must include
-
 
1930
	 * length information.
-
 
1931
	 */
-
 
1932
	union {
-
 
1933
		u32 fixed;
-
 
1934
		u32 mask;
-
 
1935
	} length;
-
 
1936
 
-
 
1937
	/*
-
 
1938
	 * Describes where to find a register address in the command to check
-
 
1939
	 * against the ring's register whitelist. Only valid if flags has the
-
 
1940
	 * CMD_DESC_REGISTER bit set.
-
 
1941
	 */
-
 
1942
	struct {
-
 
1943
		u32 offset;
-
 
1944
		u32 mask;
-
 
1945
	} reg;
-
 
1946
 
-
 
1947
#define MAX_CMD_DESC_BITMASKS 3
-
 
1948
	/*
-
 
1949
	 * Describes command checks where a particular dword is masked and
-
 
1950
	 * compared against an expected value. If the command does not match
-
 
1951
	 * the expected value, the parser rejects it. Only valid if flags has
-
 
1952
	 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
-
 
1953
	 * are valid.
-
 
1954
	 *
-
 
1955
	 * If the check specifies a non-zero condition_mask then the parser
-
 
1956
	 * only performs the check when the bits specified by condition_mask
-
 
1957
	 * are non-zero.
-
 
1958
	 */
-
 
1959
	struct {
-
 
1960
		u32 offset;
-
 
1961
		u32 mask;
-
 
1962
		u32 expected;
-
 
1963
		u32 condition_offset;
-
 
1964
		u32 condition_mask;
-
 
1965
	} bits[MAX_CMD_DESC_BITMASKS];
-
 
1966
};
-
 
1967
 
-
 
1968
/*
-
 
1969
 * A table of commands requiring special handling by the command parser.
-
 
1970
 *
-
 
1971
 * Each ring has an array of tables. Each table consists of an array of command
-
 
1972
 * descriptors, which must be sorted with command opcodes in ascending order.
-
 
1973
 */
1761
 
1974
struct drm_i915_cmd_table {
Line 1762... Line 1975...
1762
	struct i915_ctx_hang_stats hang_stats;
1975
	const struct drm_i915_cmd_descriptor *table;
Line 1763... Line 1976...
1763
	atomic_t rps_wait_boost;
1976
	int count;
1764
};
1977
};
1765
 
1978
 
1766
#define INTEL_INFO(dev)	(to_i915(dev)->info)
1979
#define INTEL_INFO(dev)	(&to_i915(dev)->info)
Line 1788... Line 2001...
1788
				 (dev)->pdev->device == 0x015a)
2001
				 (dev)->pdev->device == 0x015a)
1789
#define IS_SNB_GT1(dev)		((dev)->pdev->device == 0x0102 || \
2002
#define IS_SNB_GT1(dev)		((dev)->pdev->device == 0x0102 || \
1790
				 (dev)->pdev->device == 0x0106 || \
2003
				 (dev)->pdev->device == 0x0106 || \
1791
				 (dev)->pdev->device == 0x010A)
2004
				 (dev)->pdev->device == 0x010A)
1792
#define IS_VALLEYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview)
2005
#define IS_VALLEYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview)
-
 
2006
#define IS_CHERRYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
1793
#define IS_HASWELL(dev)	(INTEL_INFO(dev)->is_haswell)
2007
#define IS_HASWELL(dev)	(INTEL_INFO(dev)->is_haswell)
1794
#define IS_BROADWELL(dev)	(INTEL_INFO(dev)->gen == 8)
2008
#define IS_BROADWELL(dev)	(!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
1795
#define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
2009
#define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
1796
#define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \
2010
#define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \
1797
				 ((dev)->pdev->device & 0xFF00) == 0x0C00)
2011
				 ((dev)->pdev->device & 0xFF00) == 0x0C00)
1798
#define IS_BDW_ULT(dev)		(IS_BROADWELL(dev) && \
2012
#define IS_BDW_ULT(dev)		(IS_BROADWELL(dev) && \
1799
				 (((dev)->pdev->device & 0xf) == 0x2  || \
2013
				 (((dev)->pdev->device & 0xf) == 0x2  || \
Line 1802... Line 2016...
1802
#define IS_HSW_ULT(dev)		(IS_HASWELL(dev) && \
2016
#define IS_HSW_ULT(dev)		(IS_HASWELL(dev) && \
1803
				 ((dev)->pdev->device & 0xFF00) == 0x0A00)
2017
				 ((dev)->pdev->device & 0xFF00) == 0x0A00)
1804
#define IS_ULT(dev)		(IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
2018
#define IS_ULT(dev)		(IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
1805
#define IS_HSW_GT3(dev)		(IS_HASWELL(dev) && \
2019
#define IS_HSW_GT3(dev)		(IS_HASWELL(dev) && \
1806
				 ((dev)->pdev->device & 0x00F0) == 0x0020)
2020
				 ((dev)->pdev->device & 0x00F0) == 0x0020)
-
 
2021
/* ULX machines are also considered ULT. */
-
 
2022
#define IS_HSW_ULX(dev)		((dev)->pdev->device == 0x0A0E || \
-
 
2023
				 (dev)->pdev->device == 0x0A1E)
1807
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
2024
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
Line 1808... Line 2025...
1808
 
2025
 
1809
/*
2026
/*
1810
 * The genX designation typically refers to the render engine, so render
2027
 * The genX designation typically refers to the render engine, so render
Line 1822... Line 2039...
1822
 
2039
 
1823
#define RENDER_RING		(1<
2040
#define RENDER_RING		(1<
1824
#define BSD_RING		(1<
2041
#define BSD_RING		(1<
1825
#define BLT_RING		(1<
2042
#define BLT_RING		(1<
-
 
2043
#define VEBOX_RING		(1<
1826
#define VEBOX_RING		(1<
2044
#define BSD2_RING		(1<
-
 
2045
#define HAS_BSD(dev)            (INTEL_INFO(dev)->ring_mask & BSD_RING)
1827
#define HAS_BSD(dev)            (INTEL_INFO(dev)->ring_mask & BSD_RING)
2046
#define HAS_BSD2(dev)		(INTEL_INFO(dev)->ring_mask & BSD2_RING)
1828
#define HAS_BLT(dev)            (INTEL_INFO(dev)->ring_mask & BLT_RING)
2047
#define HAS_BLT(dev)            (INTEL_INFO(dev)->ring_mask & BLT_RING)
1829
#define HAS_VEBOX(dev)            (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
2048
#define HAS_VEBOX(dev)            (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
1830
#define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
2049
#define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
-
 
2050
#define HAS_WT(dev)		((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
1831
#define HAS_WT(dev)            (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
2051
				 to_i915(dev)->ellc_size)
Line 1832... Line 2052...
1832
#define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
2052
#define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
-
 
2053
 
1833
 
2054
#define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 6)
-
 
2055
#define HAS_ALIASING_PPGTT(dev)	(INTEL_INFO(dev)->gen >= 6)
-
 
2056
#define HAS_PPGTT(dev)		(INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
Line 1834... Line 2057...
1834
#define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 6)
2057
#define USES_PPGTT(dev)		intel_enable_ppgtt(dev, false)
1835
#define HAS_ALIASING_PPGTT(dev)	(INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
2058
#define USES_FULL_PPGTT(dev)	intel_enable_ppgtt(dev, true)
Line 1836... Line 2059...
1836
 
2059
 
1837
#define HAS_OVERLAY(dev)		(INTEL_INFO(dev)->has_overlay)
2060
#define HAS_OVERLAY(dev)		(INTEL_INFO(dev)->has_overlay)
-
 
2061
#define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
-
 
2062
 
-
 
2063
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
-
 
2064
#define HAS_BROKEN_CS_TLB(dev)		(IS_I830(dev) || IS_845G(dev))
-
 
2065
/*
-
 
2066
 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
-
 
2067
 * even when in MSI mode. This results in spurious interrupt warnings if the
-
 
2068
 * legacy irq no. is shared with another device. The kernel then disables that
Line 1838... Line 2069...
1838
#define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
2069
 * interrupt source and so prevents the other device from working properly.
1839
 
2070
 */
1840
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
2071
#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
1841
#define HAS_BROKEN_CS_TLB(dev)		(IS_I830(dev) || IS_845G(dev))
2072
#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
Line 1858... Line 2089...
1858
#define HAS_IPS(dev)		(IS_ULT(dev) || IS_BROADWELL(dev))
2089
#define HAS_IPS(dev)		(IS_ULT(dev) || IS_BROADWELL(dev))
Line 1859... Line 2090...
1859
 
2090
 
1860
#define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi)
2091
#define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi)
1861
#define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
2092
#define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
1862
#define HAS_PSR(dev)		(IS_HASWELL(dev) || IS_BROADWELL(dev))
2093
#define HAS_PSR(dev)		(IS_HASWELL(dev) || IS_BROADWELL(dev))
1863
#define HAS_PC8(dev)		(IS_HASWELL(dev)) /* XXX HSW:ULX */
2094
#define HAS_RUNTIME_PM(dev)	(IS_GEN6(dev) || IS_HASWELL(dev) || \
Line 1864... Line 2095...
1864
#define HAS_RUNTIME_PM(dev)	(IS_HASWELL(dev))
2095
				 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
1865
 
2096
 
1866
#define INTEL_PCH_DEVICE_ID_MASK		0xff00
2097
#define INTEL_PCH_DEVICE_ID_MASK		0xff00
1867
#define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
2098
#define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
Line 1875... Line 2106...
1875
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
2106
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1876
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
2107
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1877
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
2108
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
1878
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
2109
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
Line -... Line 2110...
-
 
2110
 
-
 
2111
#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
1879
 
2112
 
1880
/* DPF == dynamic parity feature */
2113
/* DPF == dynamic parity feature */
1881
#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
2114
#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
Line 1882... Line 2115...
1882
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
2115
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
Line 1883... Line 2116...
1883
 
2116
 
Line 1884... Line -...
1884
#define GT_FREQUENCY_MULTIPLIER 50
-
 
1885
 
-
 
1886
#include "i915_trace.h"
-
 
1887
 
-
 
1888
extern unsigned int i915_fbpercrtc      __always_unused;
-
 
1889
extern int i915_panel_ignore_lid        __read_mostly;
-
 
1890
extern unsigned int i915_powersave      __read_mostly;
-
 
1891
extern int i915_semaphores              __read_mostly;
-
 
1892
extern unsigned int i915_lvds_downclock __read_mostly;
-
 
1893
extern int i915_lvds_channel_mode       __read_mostly;
-
 
1894
extern int i915_panel_use_ssc           __read_mostly;
-
 
1895
extern int i915_vbt_sdvo_panel_type     __read_mostly;
-
 
1896
extern int i915_enable_rc6              __read_mostly;
-
 
1897
extern int i915_enable_fbc              __read_mostly;
-
 
1898
extern bool i915_enable_hangcheck       __read_mostly;
-
 
1899
extern int i915_enable_ppgtt            __read_mostly;
-
 
1900
extern int i915_enable_psr __read_mostly;
-
 
1901
extern unsigned int i915_preliminary_hw_support __read_mostly;
-
 
1902
extern int i915_disable_power_well __read_mostly;
-
 
1903
extern int i915_enable_ips __read_mostly;
-
 
Line 1904... Line 2117...
1904
extern bool i915_fastboot __read_mostly;
2117
#define GT_FREQUENCY_MULTIPLIER 50
1905
extern int i915_enable_pc8 __read_mostly;
2118
 
Line -... Line 2119...
-
 
2119
#include "i915_trace.h"
-
 
2120
 
-
 
2121
 
-
 
2122
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
-
 
2123
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
-
 
2124
 
-
 
2125
/* i915_params.c */
-
 
2126
struct i915_params {
-
 
2127
	int modeset;
-
 
2128
	int panel_ignore_lid;
-
 
2129
	unsigned int powersave;
-
 
2130
	int semaphores;
-
 
2131
	unsigned int lvds_downclock;
-
 
2132
	int lvds_channel_mode;
-
 
2133
	int panel_use_ssc;
-
 
2134
	int vbt_sdvo_panel_type;
-
 
2135
	int enable_rc6;
-
 
2136
	int enable_fbc;
-
 
2137
	int enable_ppgtt;
-
 
2138
	int enable_psr;
-
 
2139
	unsigned int preliminary_hw_support;
-
 
2140
	int disable_power_well;
-
 
2141
	int enable_ips;
-
 
2142
	int invert_brightness;
-
 
2143
	int enable_cmd_parser;
-
 
2144
	/* leave bools at the end to not create holes */
-
 
2145
	bool enable_hangcheck;
-
 
2146
	bool fastboot;
-
 
2147
	bool prefault_disable;
-
 
2148
	bool reset;
-
 
2149
	bool disable_display;
1906
extern int i915_pc8_timeout __read_mostly;
2150
	bool disable_vtd_wa;
1907
extern bool i915_prefault_disable __read_mostly;
2151
	int use_mmio_flip;
1908
 
2152
	bool mmio_debug;
1909
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
2153
};
1910
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
2154
extern struct i915_params i915 __read_mostly;
1911
 
2155
 
1912
				/* i915_dma.c */
2156
				/* i915_dma.c */
1913
void i915_update_dri1_breadcrumb(struct drm_device *dev);
2157
void i915_update_dri1_breadcrumb(struct drm_device *dev);
1914
extern void i915_kernel_lost_context(struct drm_device * dev);
2158
extern void i915_kernel_lost_context(struct drm_device * dev);
1915
extern int i915_driver_load(struct drm_device *, unsigned long flags);
2159
extern int i915_driver_load(struct drm_device *, unsigned long flags);
1916
extern int i915_driver_unload(struct drm_device *);
2160
extern int i915_driver_unload(struct drm_device *);
1917
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
2161
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
1918
extern void i915_driver_lastclose(struct drm_device * dev);
2162
extern void i915_driver_lastclose(struct drm_device * dev);
1919
extern void i915_driver_preclose(struct drm_device *dev,
2163
extern void i915_driver_preclose(struct drm_device *dev,
1920
				 struct drm_file *file_priv);
2164
				 struct drm_file *file);
1921
extern void i915_driver_postclose(struct drm_device *dev,
2165
extern void i915_driver_postclose(struct drm_device *dev,
Line 1932... Line 2176...
1932
extern int i915_reset(struct drm_device *dev);
2176
extern int i915_reset(struct drm_device *dev);
1933
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
2177
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1934
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2178
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1935
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2179
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1936
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2180
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
-
 
2181
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
-
 
2182
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
Line 1937... Line 2183...
1937
 
2183
 
Line 1938... Line 2184...
1938
extern void intel_console_resume(struct work_struct *work);
2184
extern void intel_console_resume(struct work_struct *work);
1939
 
2185
 
-
 
2186
/* i915_irq.c */
1940
/* i915_irq.c */
2187
void i915_queue_hangcheck(struct drm_device *dev);
-
 
2188
__printf(3, 4)
Line -... Line 2189...
-
 
2189
void i915_handle_error(struct drm_device *dev, bool wedged,
-
 
2190
		       const char *fmt, ...);
1941
void i915_queue_hangcheck(struct drm_device *dev);
2191
 
1942
void i915_handle_error(struct drm_device *dev, bool wedged);
2192
void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
Line 1943... Line 2193...
1943
 
2193
							int new_delay);
1944
extern void intel_irq_init(struct drm_device *dev);
2194
extern void intel_irq_init(struct drm_device *dev);
-
 
2195
extern void intel_hpd_init(struct drm_device *dev);
1945
extern void intel_hpd_init(struct drm_device *dev);
2196
 
1946
 
2197
extern void intel_uncore_sanitize(struct drm_device *dev);
1947
extern void intel_uncore_sanitize(struct drm_device *dev);
2198
extern void intel_uncore_early_sanitize(struct drm_device *dev,
-
 
2199
					bool restore_forcewake);
Line 1948... Line 2200...
1948
extern void intel_uncore_early_sanitize(struct drm_device *dev);
2200
extern void intel_uncore_init(struct drm_device *dev);
1949
extern void intel_uncore_init(struct drm_device *dev);
2201
extern void intel_uncore_check_errors(struct drm_device *dev);
-
 
2202
extern void intel_uncore_fini(struct drm_device *dev);
Line 1950... Line 2203...
1950
extern void intel_uncore_check_errors(struct drm_device *dev);
2203
extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
1951
extern void intel_uncore_fini(struct drm_device *dev);
2204
 
-
 
2205
void
-
 
2206
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
-
 
2207
		     u32 status_mask);
-
 
2208
 
Line 1952... Line 2209...
1952
 
2209
void
1953
void
2210
i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
1954
i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
2211
		      u32 status_mask);
1955
 
2212
 
Line 1997... Line 2254...
1997
			   struct drm_file *file_priv);
2254
			   struct drm_file *file_priv);
1998
int i915_gem_set_tiling(struct drm_device *dev, void *data,
2255
int i915_gem_set_tiling(struct drm_device *dev, void *data,
1999
			struct drm_file *file_priv);
2256
			struct drm_file *file_priv);
2000
int i915_gem_get_tiling(struct drm_device *dev, void *data,
2257
int i915_gem_get_tiling(struct drm_device *dev, void *data,
2001
			struct drm_file *file_priv);
2258
			struct drm_file *file_priv);
-
 
2259
int i915_gem_init_userptr(struct drm_device *dev);
-
 
2260
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
-
 
2261
			   struct drm_file *file);
2002
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
2262
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
2003
				struct drm_file *file_priv);
2263
				struct drm_file *file_priv);
2004
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
2264
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
2005
			struct drm_file *file_priv);
2265
			struct drm_file *file_priv);
2006
void i915_gem_load(struct drm_device *dev);
2266
void i915_gem_load(struct drm_device *dev);
Line 2008... Line 2268...
2008
void i915_gem_object_free(struct drm_i915_gem_object *obj);
2268
void i915_gem_object_free(struct drm_i915_gem_object *obj);
2009
void i915_gem_object_init(struct drm_i915_gem_object *obj,
2269
void i915_gem_object_init(struct drm_i915_gem_object *obj,
2010
			 const struct drm_i915_gem_object_ops *ops);
2270
			 const struct drm_i915_gem_object_ops *ops);
2011
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
2271
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
2012
						  size_t size);
2272
						  size_t size);
-
 
2273
void i915_init_vm(struct drm_i915_private *dev_priv,
-
 
2274
		  struct i915_address_space *vm);
2013
void i915_gem_free_object(struct drm_gem_object *obj);
2275
void i915_gem_free_object(struct drm_gem_object *obj);
2014
void i915_gem_vma_destroy(struct i915_vma *vma);
2276
void i915_gem_vma_destroy(struct i915_vma *vma);
Line -... Line 2277...
-
 
2277
 
-
 
2278
#define PIN_MAPPABLE 0x1
-
 
2279
#define PIN_NONBLOCK 0x2
-
 
2280
#define PIN_GLOBAL 0x4
-
 
2281
#define PIN_OFFSET_BIAS 0x8
2015
 
2282
#define PIN_OFFSET_MASK (~4095)
2016
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
2283
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
2017
				     struct i915_address_space *vm,
2284
				     struct i915_address_space *vm,
2018
				     uint32_t alignment,
-
 
2019
				     bool map_and_fenceable,
2285
				     uint32_t alignment,
2020
				     bool nonblocking);
-
 
2021
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
2286
				     uint64_t flags);
2022
int __must_check i915_vma_unbind(struct i915_vma *vma);
-
 
2023
int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
2287
int __must_check i915_vma_unbind(struct i915_vma *vma);
2024
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
2288
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
2025
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
2289
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
2026
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
2290
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
Line -... Line 2291...
-
 
2291
void i915_gem_lastclose(struct drm_device *dev);
-
 
2292
 
-
 
2293
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
2027
void i915_gem_lastclose(struct drm_device *dev);
2294
				    int *needs_clflush);
2028
 
2295
 
2029
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
2296
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
2030
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
2297
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
Line 2047... Line 2314...
2047
	obj->pages_pin_count--;
2314
	obj->pages_pin_count--;
2048
}
2315
}
Line 2049... Line 2316...
2049
 
2316
 
2050
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
2317
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
2051
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
2318
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
2052
			 struct intel_ring_buffer *to);
2319
			 struct intel_engine_cs *to);
2053
void i915_vma_move_to_active(struct i915_vma *vma,
2320
void i915_vma_move_to_active(struct i915_vma *vma,
2054
				    struct intel_ring_buffer *ring);
2321
			     struct intel_engine_cs *ring);
2055
int i915_gem_dumb_create(struct drm_file *file_priv,
2322
int i915_gem_dumb_create(struct drm_file *file_priv,
2056
			 struct drm_device *dev,
2323
			 struct drm_device *dev,
2057
			 struct drm_mode_create_dumb *args);
2324
			 struct drm_mode_create_dumb *args);
2058
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
2325
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
Line 2069... Line 2336...
2069
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
2336
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
2070
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
2337
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
2071
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
2338
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
2072
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
2339
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
Line 2073... Line -...
2073
 
-
 
2074
static inline bool
2340
 
2075
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
-
 
2076
{
2341
bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
2077
	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-
 
2078
		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
 
2079
		dev_priv->fence_regs[obj->fence_reg].pin_count++;
-
 
2080
		return true;
-
 
2081
	} else
-
 
2082
		return false;
-
 
Line 2083... Line 2342...
2083
}
2342
void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
2084
 
2343
 
2085
static inline void
-
 
2086
i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
-
 
2087
{
-
 
2088
	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-
 
2089
		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
 
2090
		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
-
 
2091
		dev_priv->fence_regs[obj->fence_reg].pin_count--;
-
 
Line 2092... Line 2344...
2092
	}
2344
struct drm_i915_gem_request *
2093
}
2345
i915_gem_find_active_request(struct intel_engine_cs *ring);
2094
 
2346
 
2095
bool i915_gem_retire_requests(struct drm_device *dev);
2347
bool i915_gem_retire_requests(struct drm_device *dev);
-
 
2348
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
-
 
2349
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
2096
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
2350
				      bool interruptible);
2097
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
2351
int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
2098
				      bool interruptible);
2352
 
2099
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
2353
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
2100
{
2354
{
Line 2110... Line 2364...
2110
static inline u32 i915_reset_count(struct i915_gpu_error *error)
2364
static inline u32 i915_reset_count(struct i915_gpu_error *error)
2111
{
2365
{
2112
	return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
2366
	return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
2113
}
2367
}
Line -... Line 2368...
-
 
2368
 
-
 
2369
static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
-
 
2370
{
-
 
2371
	return dev_priv->gpu_error.stop_rings == 0 ||
-
 
2372
		dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
-
 
2373
}
-
 
2374
 
-
 
2375
static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
-
 
2376
{
-
 
2377
	return dev_priv->gpu_error.stop_rings == 0 ||
-
 
2378
		dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
-
 
2379
}
2114
 
2380
 
2115
void i915_gem_reset(struct drm_device *dev);
2381
void i915_gem_reset(struct drm_device *dev);
2116
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
2382
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
2117
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
2383
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
2118
int __must_check i915_gem_init(struct drm_device *dev);
2384
int __must_check i915_gem_init(struct drm_device *dev);
2119
int __must_check i915_gem_init_hw(struct drm_device *dev);
2385
int __must_check i915_gem_init_hw(struct drm_device *dev);
2120
int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
2386
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
2121
void i915_gem_init_swizzling(struct drm_device *dev);
2387
void i915_gem_init_swizzling(struct drm_device *dev);
2122
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
2388
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
2123
int __must_check i915_gpu_idle(struct drm_device *dev);
2389
int __must_check i915_gpu_idle(struct drm_device *dev);
2124
int __must_check i915_gem_suspend(struct drm_device *dev);
2390
int __must_check i915_gem_suspend(struct drm_device *dev);
2125
int __i915_add_request(struct intel_ring_buffer *ring,
2391
int __i915_add_request(struct intel_engine_cs *ring,
2126
				  struct drm_file *file,
2392
				  struct drm_file *file,
2127
		       struct drm_i915_gem_object *batch_obj,
2393
		       struct drm_i915_gem_object *batch_obj,
2128
		     u32 *seqno);
2394
		     u32 *seqno);
2129
#define i915_add_request(ring, seqno) \
2395
#define i915_add_request(ring, seqno) \
2130
	__i915_add_request(ring, NULL, NULL, seqno)
2396
	__i915_add_request(ring, NULL, NULL, seqno)
2131
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
2397
int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
2132
				   uint32_t seqno);
2398
				   uint32_t seqno);
2133
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
2399
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
2134
int __must_check
2400
int __must_check
2135
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
2401
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
2136
				  bool write);
2402
				  bool write);
2137
int __must_check
2403
int __must_check
2138
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
2404
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
2139
int __must_check
2405
int __must_check
2140
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2406
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2141
				     u32 alignment,
2407
				     u32 alignment,
2142
				     struct intel_ring_buffer *pipelined);
2408
				     struct intel_engine_cs *pipelined);
2143
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
2409
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
2144
int i915_gem_attach_phys_object(struct drm_device *dev,
-
 
2145
				struct drm_i915_gem_object *obj,
-
 
2146
				int id,
2410
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
2147
				int align);
-
 
2148
void i915_gem_detach_phys_object(struct drm_device *dev,
-
 
2149
				 struct drm_i915_gem_object *obj);
-
 
2150
void i915_gem_free_all_phys_object(struct drm_device *dev);
2411
				int align);
2151
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
2412
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
Line 2152... Line 2413...
2152
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
2413
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
2153
 
2414
 
Line 2180... Line 2441...
2180
struct i915_vma *
2441
struct i915_vma *
2181
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2442
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2182
				  struct i915_address_space *vm);
2443
				  struct i915_address_space *vm);
Line 2183... Line 2444...
2183
 
2444
 
-
 
2445
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
-
 
2446
static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
-
 
2447
	struct i915_vma *vma;
-
 
2448
	list_for_each_entry(vma, &obj->vma_list, vma_link)
-
 
2449
		if (vma->pin_count > 0)
-
 
2450
			return true;
-
 
2451
	return false;
Line 2184... Line 2452...
2184
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
2452
}
2185
 
2453
 
2186
/* Some GGTT VM helpers */
2454
/* Some GGTT VM helpers */
2187
#define obj_to_ggtt(obj) \
2455
#define obj_to_ggtt(obj) \
Line 2211... Line 2479...
2211
}
2479
}
Line 2212... Line 2480...
2212
 
2480
 
2213
static inline int __must_check
2481
static inline int __must_check
2214
i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2482
i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2215
		      uint32_t alignment,
2483
		      uint32_t alignment,
-
 
2484
		      unsigned flags)
-
 
2485
{
-
 
2486
	return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL);
-
 
2487
}
2216
		      bool map_and_fenceable,
2488
 
-
 
2489
static inline int
2217
		      bool nonblocking)
2490
i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2218
{
2491
{
2219
	return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
-
 
2220
				   map_and_fenceable, nonblocking);
2492
	return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
Line -... Line 2493...
-
 
2493
}
-
 
2494
 
2221
}
2495
void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
-
 
2496
 
2222
 
2497
/* i915_gem_context.c */
2223
/* i915_gem_context.c */
2498
#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
-
 
2499
int __must_check i915_gem_context_init(struct drm_device *dev);
-
 
2500
void i915_gem_context_fini(struct drm_device *dev);
-
 
2501
void i915_gem_context_reset(struct drm_device *dev);
2224
int __must_check i915_gem_context_init(struct drm_device *dev);
2502
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
2225
void i915_gem_context_fini(struct drm_device *dev);
2503
int i915_gem_context_enable(struct drm_i915_private *dev_priv);
2226
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
2504
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
-
 
2505
int i915_switch_context(struct intel_engine_cs *ring,
-
 
2506
			struct intel_context *to);
2227
int i915_switch_context(struct intel_ring_buffer *ring,
2507
struct intel_context *
2228
			struct drm_file *file, int to_id);
2508
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
2229
void i915_gem_context_free(struct kref *ctx_ref);
2509
void i915_gem_context_free(struct kref *ctx_ref);
2230
static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
2510
static inline void i915_gem_context_reference(struct intel_context *ctx)
2231
{
2511
{
Line 2232... Line 2512...
2232
	kref_get(&ctx->ref);
2512
	kref_get(&ctx->ref);
2233
}
2513
}
2234
 
2514
 
2235
static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
2515
static inline void i915_gem_context_unreference(struct intel_context *ctx)
Line 2236... Line 2516...
2236
{
2516
{
-
 
2517
	kref_put(&ctx->ref, i915_gem_context_free);
2237
	kref_put(&ctx->ref, i915_gem_context_free);
2518
}
2238
}
-
 
2239
 
2519
 
-
 
2520
static inline bool i915_gem_context_is_default(const struct intel_context *c)
2240
struct i915_ctx_hang_stats * __must_check
2521
{
2241
i915_gem_context_get_hang_stats(struct drm_device *dev,
2522
	return c->user_handle == DEFAULT_CONTEXT_HANDLE;
2242
				struct drm_file *file,
2523
}
2243
				u32 id);
2524
 
Line 2244... Line 2525...
2244
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2525
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2245
				  struct drm_file *file);
-
 
2246
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
-
 
2247
				   struct drm_file *file);
-
 
2248
 
-
 
2249
/* i915_gem_gtt.c */
-
 
2250
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
-
 
2251
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
-
 
2252
			    struct drm_i915_gem_object *obj,
-
 
2253
			    enum i915_cache_level cache_level);
-
 
2254
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
-
 
2255
			      struct drm_i915_gem_object *obj);
-
 
2256
 
-
 
2257
void i915_check_and_clear_faults(struct drm_device *dev);
-
 
2258
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
-
 
2259
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
-
 
2260
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-
 
2261
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
-
 
2262
				enum i915_cache_level cache_level);
-
 
2263
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
2526
				  struct drm_file *file);
2264
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
-
 
2265
void i915_gem_init_global_gtt(struct drm_device *dev);
-
 
2266
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
-
 
2267
			       unsigned long mappable_end, unsigned long end);
-
 
2268
int i915_gem_gtt_init(struct drm_device *dev);
-
 
2269
static inline void i915_gem_chipset_flush(struct drm_device *dev)
-
 
2270
{
-
 
2271
	if (INTEL_INFO(dev)->gen < 6)
2527
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2272
		intel_gtt_chipset_flush();
2528
				   struct drm_file *file);
2273
}
2529
 
2274
 
2530
/* i915_gem_render_state.c */
2275
 
2531
int i915_gem_render_state_init(struct intel_engine_cs *ring);
2276
/* i915_gem_evict.c */
2532
/* i915_gem_evict.c */
-
 
2533
int __must_check i915_gem_evict_something(struct drm_device *dev,
2277
int __must_check i915_gem_evict_something(struct drm_device *dev,
2534
					  struct i915_address_space *vm,
2278
					  struct i915_address_space *vm,
2535
					  int min_size,
2279
					  int min_size,
2536
					  unsigned alignment,
2280
					  unsigned alignment,
2537
					  unsigned cache_level,
Line -... Line 2538...
-
 
2538
					  unsigned long start,
-
 
2539
					  unsigned long end,
-
 
2540
					  unsigned flags);
-
 
2541
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
-
 
2542
int i915_gem_evict_everything(struct drm_device *dev);
-
 
2543
 
-
 
2544
/* belongs in i915_gem_gtt.h */
2281
					  unsigned cache_level,
2545
static inline void i915_gem_chipset_flush(struct drm_device *dev)
2282
					  bool mappable,
2546
{
2283
					  bool nonblock);
2547
	if (INTEL_INFO(dev)->gen < 6)
2284
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2548
		intel_gtt_chipset_flush();
2285
int i915_gem_evict_everything(struct drm_device *dev);
2549
}
2286
 
2550
 
2287
/* i915_gem_stolen.c */
2551
/* i915_gem_stolen.c */
2288
int i915_gem_init_stolen(struct drm_device *dev);
2552
int i915_gem_init_stolen(struct drm_device *dev);
2289
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
2553
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
2290
void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
2554
void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
2291
void i915_gem_cleanup_stolen(struct drm_device *dev);
2555
void i915_gem_cleanup_stolen(struct drm_device *dev);
2292
struct drm_i915_gem_object *
2556
struct drm_i915_gem_object *
2293
i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
-
 
Line 2294... Line 2557...
2294
struct drm_i915_gem_object *
2557
i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
2295
i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
2558
struct drm_i915_gem_object *
2296
					       u32 stolen_offset,
2559
i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
2297
					       u32 gtt_offset,
2560
					       u32 stolen_offset,
Line 2298... Line 2561...
2298
					       u32 size);
2561
					       u32 gtt_offset,
2299
void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
2562
					       u32 size);
2300
 
2563
 
Line 2337... Line 2600...
2337
static inline void i915_error_state_buf_release(
2600
static inline void i915_error_state_buf_release(
2338
	struct drm_i915_error_state_buf *eb)
2601
	struct drm_i915_error_state_buf *eb)
2339
{
2602
{
2340
	kfree(eb->buf);
2603
	kfree(eb->buf);
2341
}
2604
}
2342
void i915_capture_error_state(struct drm_device *dev);
2605
void i915_capture_error_state(struct drm_device *dev, bool wedge,
-
 
2606
			      const char *error_msg);
2343
void i915_error_state_get(struct drm_device *dev,
2607
void i915_error_state_get(struct drm_device *dev,
2344
			  struct i915_error_state_file_priv *error_priv);
2608
			  struct i915_error_state_file_priv *error_priv);
2345
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
2609
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
2346
void i915_destroy_error_state(struct drm_device *dev);
2610
void i915_destroy_error_state(struct drm_device *dev);
Line 2347... Line 2611...
2347
 
2611
 
2348
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
2612
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
Line -... Line 2613...
-
 
2613
const char *i915_cache_level_str(int type);
-
 
2614
 
-
 
2615
/* i915_cmd_parser.c */
-
 
2616
int i915_cmd_parser_get_version(void);
-
 
2617
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
-
 
2618
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
-
 
2619
bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
-
 
2620
int i915_parse_cmds(struct intel_engine_cs *ring,
-
 
2621
		    struct drm_i915_gem_object *batch_obj,
-
 
2622
		    u32 batch_start_offset,
2349
const char *i915_cache_level_str(int type);
2623
		    bool is_master);
2350
 
2624
 
2351
/* i915_suspend.c */
2625
/* i915_suspend.c */
Line 2352... Line 2626...
2352
extern int i915_save_state(struct drm_device *dev);
2626
extern int i915_save_state(struct drm_device *dev);
Line 2419... Line 2693...
2419
extern void intel_modeset_init_hw(struct drm_device *dev);
2693
extern void intel_modeset_init_hw(struct drm_device *dev);
2420
extern void intel_modeset_suspend_hw(struct drm_device *dev);
2694
extern void intel_modeset_suspend_hw(struct drm_device *dev);
2421
extern void intel_modeset_init(struct drm_device *dev);
2695
extern void intel_modeset_init(struct drm_device *dev);
2422
extern void intel_modeset_gem_init(struct drm_device *dev);
2696
extern void intel_modeset_gem_init(struct drm_device *dev);
2423
extern void intel_modeset_cleanup(struct drm_device *dev);
2697
extern void intel_modeset_cleanup(struct drm_device *dev);
-
 
2698
extern void intel_connector_unregister(struct intel_connector *);
2424
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
2699
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
2425
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
2700
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
2426
					 bool force_restore);
2701
					 bool force_restore);
2427
extern void i915_redisable_vga(struct drm_device *dev);
2702
extern void i915_redisable_vga(struct drm_device *dev);
-
 
2703
extern void i915_redisable_vga_power_on(struct drm_device *dev);
2428
extern bool intel_fbc_enabled(struct drm_device *dev);
2704
extern bool intel_fbc_enabled(struct drm_device *dev);
2429
extern void intel_disable_fbc(struct drm_device *dev);
2705
extern void intel_disable_fbc(struct drm_device *dev);
2430
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
2706
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
2431
extern void intel_init_pch_refclk(struct drm_device *dev);
2707
extern void intel_init_pch_refclk(struct drm_device *dev);
2432
extern void gen6_set_rps(struct drm_device *dev, u8 val);
2708
extern void gen6_set_rps(struct drm_device *dev, u8 val);
2433
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
2709
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
2434
extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
2710
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
2435
extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
2711
				  bool enable);
2436
extern void intel_detect_pch(struct drm_device *dev);
2712
extern void intel_detect_pch(struct drm_device *dev);
2437
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
2713
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
2438
extern int intel_enable_rc6(const struct drm_device *dev);
2714
extern int intel_enable_rc6(const struct drm_device *dev);
Line 2439... Line 2715...
2439
 
2715
 
2440
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
2716
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
2441
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
2717
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
2442
			struct drm_file *file);
2718
			struct drm_file *file);
2443
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
2719
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
Line -... Line 2720...
-
 
2720
			       struct drm_file *file);
-
 
2721
 
2444
			       struct drm_file *file);
2722
void intel_notify_mmio_flip(struct intel_engine_cs *ring);
2445
 
2723
 
2446
/* overlay */
2724
/* overlay */
2447
#ifdef CONFIG_DEBUG_FS
2725
#ifdef CONFIG_DEBUG_FS
2448
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
2726
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
Line 2459... Line 2737...
2459
 * must be set to prevent GT core from power down and stale values being
2737
 * must be set to prevent GT core from power down and stale values being
2460
 * returned.
2738
 * returned.
2461
 */
2739
 */
2462
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
2740
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
2463
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
2741
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
-
 
2742
void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
Line 2464... Line 2743...
2464
 
2743
 
2465
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
2744
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
Line 2466... Line 2745...
2466
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
2745
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
Line 2489... Line 2768...
2489
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2768
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
Line 2490... Line 2769...
2490
 
2769
 
2491
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
2770
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
Line 2492... Line -...
2492
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
-
 
2493
 
-
 
2494
void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
-
 
2495
void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
-
 
2496
 
-
 
2497
#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
-
 
2498
	(((reg) >= 0x2000 && (reg) < 0x4000) ||\
-
 
2499
	((reg) >= 0x5000 && (reg) < 0x8000) ||\
-
 
2500
	((reg) >= 0xB000 && (reg) < 0x12000) ||\
-
 
2501
	((reg) >= 0x2E000 && (reg) < 0x30000))
-
 
2502
 
-
 
2503
#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
-
 
2504
	(((reg) >= 0x12000 && (reg) < 0x14000) ||\
-
 
2505
	((reg) >= 0x22000 && (reg) < 0x24000) ||\
-
 
2506
	((reg) >= 0x30000 && (reg) < 0x40000))
2771
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
2507
 
2772
 
2508
#define FORCEWAKE_RENDER	(1 << 0)
2773
#define FORCEWAKE_RENDER	(1 << 0)
Line 2521... Line 2786...
2521
#define I915_READ(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
2786
#define I915_READ(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
2522
#define I915_WRITE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
2787
#define I915_WRITE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
2523
#define I915_READ_NOTRACE(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
2788
#define I915_READ_NOTRACE(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
2524
#define I915_WRITE_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
2789
#define I915_WRITE_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
Line -... Line 2790...
-
 
2790
 
-
 
2791
/* Be very careful with read/write 64-bit values. On 32-bit machines, they
-
 
2792
 * will be implemented using 2 32-bit writes in an arbitrary order with
-
 
2793
 * an arbitrary delay between them. This can cause the hardware to
-
 
2794
 * act upon the intermediate value, possibly leading to corruption and
-
 
2795
 * machine death. You have been warned.
2525
 
2796
 */
2526
#define I915_WRITE64(reg, val)	dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
2797
#define I915_WRITE64(reg, val)	dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
Line -... Line 2798...
-
 
2798
#define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
-
 
2799
 
-
 
2800
#define I915_READ64_2x32(lower_reg, upper_reg) ({			\
-
 
2801
		u32 upper = I915_READ(upper_reg);			\
-
 
2802
		u32 lower = I915_READ(lower_reg);			\
-
 
2803
		u32 tmp = I915_READ(upper_reg);				\
-
 
2804
		if (upper != tmp) {					\
-
 
2805
			upper = tmp;					\
-
 
2806
			lower = I915_READ(lower_reg);			\
-
 
2807
			WARN_ON(I915_READ(upper_reg) != upper);		\
-
 
2808
		}							\
2527
#define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
2809
		(u64)upper << 32 | lower; })
2528
 
2810
 
Line 2529... Line 2811...
2529
#define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
2811
#define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
2530
#define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
2812
#define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
2531
 
2813
 
2532
/* "Broadcast RGB" property */
2814
/* "Broadcast RGB" property */
Line 2533... Line 2815...
2533
#define INTEL_BROADCAST_RGB_AUTO 0
2815
#define INTEL_BROADCAST_RGB_AUTO 0
2534
#define INTEL_BROADCAST_RGB_FULL 1
2816
#define INTEL_BROADCAST_RGB_FULL 1
2535
#define INTEL_BROADCAST_RGB_LIMITED 2
-
 
2536
 
-
 
2537
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
2817
#define INTEL_BROADCAST_RGB_LIMITED 2
2538
{
2818
 
-
 
2819
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
-
 
2820
{
2539
	if (HAS_PCH_SPLIT(dev))
2821
	if (IS_VALLEYVIEW(dev))
2540
		return CPU_VGACNTRL;
2822
		return VLV_VGACNTRL;
2541
	else if (IS_VALLEYVIEW(dev))
2823
	else if (INTEL_INFO(dev)->gen >= 5)
Line 2542... Line 2824...
2542
		return VLV_VGACNTRL;
2824
		return CPU_VGACNTRL;
Line 2562... Line 2844...
2562
	unsigned long j = timespec_to_jiffies(value);
2844
	unsigned long j = timespec_to_jiffies(value);
Line 2563... Line 2845...
2563
 
2845
 
2564
	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2846
	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
Line -... Line 2847...
-
 
2847
}
-
 
2848
 
2565
}
2849
/*
-
 
2850
 * If you need to wait X milliseconds between events A and B, but event B
-
 
2851
 * doesn't happen exactly after event A, you record the timestamp (jiffies) of
-
 
2852
 * when event A happened, then just before event B you call this function and
-
 
2853
 * pass the timestamp as the first argument, and X as the second argument.
-
 
2854
 */
2566
 
2855
static inline void
-
 
2856
wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
-
 
2857
{
-
 
2858
	unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
2567
static inline int mutex_trylock(struct mutex *lock)
2859
 
-
 
2860
	/*
-
 
2861
	 * Don't re-read the value of "jiffies" every time since it may change
2568
{
2862
	 * behind our back and break the math.
-
 
2863
	 */
-
 
2864
	tmp_jiffies = jiffies;
-
 
2865
	target_jiffies = timestamp_jiffies +
-
 
2866
			 msecs_to_jiffies_timeout(to_wait_ms);
-
 
2867
 
-
 
2868
	if (time_after(target_jiffies, tmp_jiffies)) {
2569
    if (likely(atomic_cmpxchg(&lock->count, 1, 0) == 1))
2869
		remaining_jiffies = target_jiffies - tmp_jiffies;
-
 
2870
		while ((int)remaining_jiffies > 0) {
-
 
2871
			delay(remaining_jiffies);
-
 
2872
			remaining_jiffies = target_jiffies - jiffies;
2570
        return 1;
2873
		}
Line 2571... Line 2874...
2571
    return 0;
2874
	}
2572
}
2875
}
2573
 
2876