Subversion Repositories Kolibri OS

Rev

Rev 4560 | Rev 5128 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2325 Serge 1
/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2
 */
3
/*
4
 *
5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6
 * All Rights Reserved.
7
 *
8
 * Permission is hereby granted, free of charge, to any person obtaining a
9
 * copy of this software and associated documentation files (the
10
 * "Software"), to deal in the Software without restriction, including
11
 * without limitation the rights to use, copy, modify, merge, publish,
12
 * distribute, sub license, and/or sell copies of the Software, and to
13
 * permit persons to whom the Software is furnished to do so, subject to
14
 * the following conditions:
15
 *
16
 * The above copyright notice and this permission notice (including the
17
 * next paragraph) shall be included in all copies or substantial portions
18
 * of the Software.
19
 *
20
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
 *
28
 */
29
 
30
#ifndef _I915_DRV_H_
31
#define _I915_DRV_H_
32
 
3480 Serge 33
#include 
34
 
2325 Serge 35
#include "i915_reg.h"
2327 Serge 36
#include "intel_bios.h"
2326 Serge 37
#include "intel_ringbuffer.h"
5060 serge 38
#include "i915_gem_gtt.h"
2325 Serge 39
//#include 
2330 Serge 40
#include 
3031 serge 41
#include 
2332 Serge 42
#include 
2325 Serge 43
//#include 
5060 serge 44
#include 
2325 Serge 45
 
46
#include 
3243 Serge 47
#include 
2325 Serge 48
 
2360 Serge 49
 
2325 Serge 50
/* General customization:
51
 */
52
 
3031 serge 53
#define I915_TILING_NONE          0
2327 Serge 54
 
3031 serge 55
#define VGA_RSRC_NONE          0x00
56
#define VGA_RSRC_LEGACY_IO     0x01
57
#define VGA_RSRC_LEGACY_MEM    0x02
58
#define VGA_RSRC_LEGACY_MASK   (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
59
/* Non-legacy access */
60
#define VGA_RSRC_NORMAL_IO     0x04
61
#define VGA_RSRC_NORMAL_MEM    0x08
2327 Serge 62
 
2325 Serge 63
#define DRIVER_AUTHOR		"Tungsten Graphics, Inc."
64
 
65
#define DRIVER_NAME		"i915"
66
#define DRIVER_DESC		"Intel Graphics"
5060 serge 67
#define DRIVER_DATE		"20140725"
2325 Serge 68
 
69
enum pipe {
4560 Serge 70
	INVALID_PIPE = -1,
2325 Serge 71
	PIPE_A = 0,
72
	PIPE_B,
73
	PIPE_C,
5060 serge 74
	_PIPE_EDP,
75
	I915_MAX_PIPES = _PIPE_EDP
2325 Serge 76
};
77
#define pipe_name(p) ((p) + 'A')
78
 
3243 Serge 79
enum transcoder {
80
	TRANSCODER_A = 0,
81
	TRANSCODER_B,
82
	TRANSCODER_C,
5060 serge 83
	TRANSCODER_EDP,
84
	I915_MAX_TRANSCODERS
3243 Serge 85
};
86
#define transcoder_name(t) ((t) + 'A')
87
 
2325 Serge 88
enum plane {
89
	PLANE_A = 0,
90
	PLANE_B,
91
	PLANE_C,
92
};
93
#define plane_name(p) ((p) + 'A')
94
 
5060 serge 95
#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
4104 Serge 96
 
3031 serge 97
enum port {
98
	PORT_A = 0,
99
	PORT_B,
100
	PORT_C,
101
	PORT_D,
102
	PORT_E,
103
	I915_MAX_PORTS
104
};
105
#define port_name(p) ((p) + 'A')
106
 
5060 serge 107
#define I915_NUM_PHYS_VLV 2
4560 Serge 108
 
109
enum dpio_channel {
110
	DPIO_CH0,
111
	DPIO_CH1
112
};
113
 
114
enum dpio_phy {
115
	DPIO_PHY0,
116
	DPIO_PHY1
117
};
118
 
4104 Serge 119
enum intel_display_power_domain {
120
	POWER_DOMAIN_PIPE_A,
121
	POWER_DOMAIN_PIPE_B,
122
	POWER_DOMAIN_PIPE_C,
123
	POWER_DOMAIN_PIPE_A_PANEL_FITTER,
124
	POWER_DOMAIN_PIPE_B_PANEL_FITTER,
125
	POWER_DOMAIN_PIPE_C_PANEL_FITTER,
126
	POWER_DOMAIN_TRANSCODER_A,
127
	POWER_DOMAIN_TRANSCODER_B,
128
	POWER_DOMAIN_TRANSCODER_C,
4560 Serge 129
	POWER_DOMAIN_TRANSCODER_EDP,
5060 serge 130
	POWER_DOMAIN_PORT_DDI_A_2_LANES,
131
	POWER_DOMAIN_PORT_DDI_A_4_LANES,
132
	POWER_DOMAIN_PORT_DDI_B_2_LANES,
133
	POWER_DOMAIN_PORT_DDI_B_4_LANES,
134
	POWER_DOMAIN_PORT_DDI_C_2_LANES,
135
	POWER_DOMAIN_PORT_DDI_C_4_LANES,
136
	POWER_DOMAIN_PORT_DDI_D_2_LANES,
137
	POWER_DOMAIN_PORT_DDI_D_4_LANES,
138
	POWER_DOMAIN_PORT_DSI,
139
	POWER_DOMAIN_PORT_CRT,
140
	POWER_DOMAIN_PORT_OTHER,
4560 Serge 141
	POWER_DOMAIN_VGA,
142
	POWER_DOMAIN_AUDIO,
5060 serge 143
	POWER_DOMAIN_PLLS,
4560 Serge 144
	POWER_DOMAIN_INIT,
145
 
146
	POWER_DOMAIN_NUM,
4104 Serge 147
};
148
 
149
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
150
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
151
		((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
4560 Serge 152
#define POWER_DOMAIN_TRANSCODER(tran) \
153
	((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
154
	 (tran) + POWER_DOMAIN_TRANSCODER_A)
4104 Serge 155
 
3746 Serge 156
enum hpd_pin {
157
	HPD_NONE = 0,
158
	HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
159
	HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
160
	HPD_CRT,
161
	HPD_SDVO_B,
162
	HPD_SDVO_C,
163
	HPD_PORT_B,
164
	HPD_PORT_C,
165
	HPD_PORT_D,
166
	HPD_NUM_PINS
167
};
168
 
3480 Serge 169
#define I915_GEM_GPU_DOMAINS \
170
	(I915_GEM_DOMAIN_RENDER | \
171
	 I915_GEM_DOMAIN_SAMPLER | \
172
	 I915_GEM_DOMAIN_COMMAND | \
173
	 I915_GEM_DOMAIN_INSTRUCTION | \
174
	 I915_GEM_DOMAIN_VERTEX)
2325 Serge 175
 
3746 Serge 176
#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
5060 serge 177
#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
2325 Serge 178
 
5060 serge 179
#define for_each_crtc(dev, crtc) \
180
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
181
 
182
#define for_each_intel_crtc(dev, intel_crtc) \
183
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
184
 
3031 serge 185
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
186
	list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
187
		if ((intel_encoder)->base.crtc == (__crtc))
188
 
5060 serge 189
#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
190
	list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
191
		if ((intel_connector)->base.encoder == (__encoder))
192
 
193
#define for_each_power_domain(domain, mask)				\
194
	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
195
		if ((1 << (domain)) & (mask))
196
 
4104 Serge 197
struct drm_i915_private;
5060 serge 198
struct i915_mmu_object;
4104 Serge 199
 
200
enum intel_dpll_id {
201
	DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
202
	/* real shared dpll ids must be >= 0 */
5060 serge 203
	DPLL_ID_PCH_PLL_A = 0,
204
	DPLL_ID_PCH_PLL_B = 1,
205
	DPLL_ID_WRPLL1 = 0,
206
	DPLL_ID_WRPLL2 = 1,
4104 Serge 207
};
208
#define I915_NUM_PLLS 2
209
 
210
struct intel_dpll_hw_state {
211
	uint32_t dpll;
212
	uint32_t dpll_md;
213
	uint32_t fp0;
214
	uint32_t fp1;
5060 serge 215
	uint32_t wrpll;
4104 Serge 216
};
217
 
218
struct intel_shared_dpll {
3031 serge 219
	int refcount; /* count of number of CRTCs sharing this PLL */
220
	int active; /* count of number of active CRTCs (i.e. DPMS on) */
221
	bool on; /* is the PLL actually active? Disabled during modeset */
4104 Serge 222
	const char *name;
223
	/* should match the index in the dev_priv->shared_dplls array */
224
	enum intel_dpll_id id;
225
	struct intel_dpll_hw_state hw_state;
5060 serge 226
	/* The mode_set hook is optional and should be used together with the
227
	 * intel_prepare_shared_dpll function. */
4104 Serge 228
	void (*mode_set)(struct drm_i915_private *dev_priv,
229
			 struct intel_shared_dpll *pll);
230
	void (*enable)(struct drm_i915_private *dev_priv,
231
		       struct intel_shared_dpll *pll);
232
	void (*disable)(struct drm_i915_private *dev_priv,
233
			struct intel_shared_dpll *pll);
234
	bool (*get_hw_state)(struct drm_i915_private *dev_priv,
235
			     struct intel_shared_dpll *pll,
236
			     struct intel_dpll_hw_state *hw_state);
3031 serge 237
};
238
 
3480 Serge 239
/* Used by dp and fdi links */
240
struct intel_link_m_n {
241
	uint32_t	tu;
242
	uint32_t	gmch_m;
243
	uint32_t	gmch_n;
244
	uint32_t	link_m;
245
	uint32_t	link_n;
246
};
247
 
248
void intel_link_compute_m_n(int bpp, int nlanes,
249
			    int pixel_clock, int link_clock,
250
			    struct intel_link_m_n *m_n);
251
 
2325 Serge 252
/* Interface history:
253
 *
254
 * 1.1: Original.
255
 * 1.2: Add Power Management
256
 * 1.3: Add vblank support
257
 * 1.4: Fix cmdbuffer path, add heap destroy
258
 * 1.5: Add vblank pipe configuration
259
 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
260
 *      - Support vertical blank on secondary display pipe
261
 */
262
#define DRIVER_MAJOR		1
263
#define DRIVER_MINOR		6
264
#define DRIVER_PATCHLEVEL	0
265
 
266
#define WATCH_LISTS	0
3031 serge 267
#define WATCH_GTT	0
2325 Serge 268
 
269
struct opregion_header;
270
struct opregion_acpi;
271
struct opregion_swsci;
272
struct opregion_asle;
273
 
274
struct intel_opregion {
3031 serge 275
	struct opregion_header __iomem *header;
276
	struct opregion_acpi __iomem *acpi;
277
	struct opregion_swsci __iomem *swsci;
4560 Serge 278
	u32 swsci_gbda_sub_functions;
279
	u32 swsci_sbcb_sub_functions;
3031 serge 280
	struct opregion_asle __iomem *asle;
281
	void __iomem *vbt;
2325 Serge 282
	u32 __iomem *lid_state;
4560 Serge 283
	struct work_struct asle_work;
2325 Serge 284
};
285
#define OPREGION_SIZE            (8*1024)
286
 
287
struct intel_overlay;
288
struct intel_overlay_error_state;
289
 
2330 Serge 290
struct drm_i915_master_private {
291
	drm_local_map_t *sarea;
292
	struct _drm_i915_sarea *sarea_priv;
293
};
2325 Serge 294
#define I915_FENCE_REG_NONE -1
3746 Serge 295
#define I915_MAX_NUM_FENCES 32
296
/* 32 fences + sign bit for FENCE_REG_NONE */
297
#define I915_MAX_NUM_FENCE_BITS 6
2325 Serge 298
 
299
struct drm_i915_fence_reg {
300
	struct list_head lru_list;
301
	struct drm_i915_gem_object *obj;
3031 serge 302
	int pin_count;
2325 Serge 303
};
304
 
305
struct sdvo_device_mapping {
306
	u8 initialized;
307
	u8 dvo_port;
308
	u8 slave_addr;
309
	u8 dvo_wiring;
310
	u8 i2c_pin;
311
	u8 ddc_pin;
312
};
313
 
314
struct intel_display_error_state;
315
 
316
struct drm_i915_error_state {
3243 Serge 317
	struct kref ref;
5060 serge 318
	struct timeval time;
319
 
320
	char error_msg[128];
321
	u32 reset_count;
322
	u32 suspend_count;
323
 
324
	/* Generic register state */
2325 Serge 325
	u32 eir;
326
	u32 pgtbl_er;
3031 serge 327
	u32 ier;
5060 serge 328
	u32 gtier[4];
3031 serge 329
	u32 ccid;
3243 Serge 330
	u32 derrmr;
331
	u32 forcewake;
2325 Serge 332
	u32 error; /* gen6+ */
3031 serge 333
	u32 err_int; /* gen7 */
5060 serge 334
	u32 done_reg;
335
	u32 gac_eco;
336
	u32 gam_ecochk;
337
	u32 gab_ctl;
338
	u32 gfx_mode;
3031 serge 339
	u32 extra_instdone[I915_NUM_INSTDONE_REG];
2342 Serge 340
	u64 fence[I915_MAX_NUM_FENCES];
5060 serge 341
	struct intel_overlay_error_state *overlay;
342
	struct intel_display_error_state *display;
343
 
3031 serge 344
	struct drm_i915_error_ring {
4560 Serge 345
		bool valid;
5060 serge 346
		/* Software tracked state */
347
		bool waiting;
348
		int hangcheck_score;
349
		enum intel_ring_hangcheck_action hangcheck_action;
350
		int num_requests;
351
 
352
		/* our own tracking of ring head and tail */
353
		u32 cpu_ring_head;
354
		u32 cpu_ring_tail;
355
 
356
		u32 semaphore_seqno[I915_NUM_RINGS - 1];
357
 
358
		/* Register state */
359
		u32 tail;
360
		u32 head;
361
		u32 ctl;
362
		u32 hws;
363
		u32 ipeir;
364
		u32 ipehr;
365
		u32 instdone;
366
		u32 bbstate;
367
		u32 instpm;
368
		u32 instps;
369
		u32 seqno;
370
		u64 bbaddr;
371
		u64 acthd;
372
		u32 fault_reg;
373
		u64 faddr;
374
		u32 rc_psmi; /* sleep state */
375
		u32 semaphore_mboxes[I915_NUM_RINGS - 1];
376
 
2325 Serge 377
	struct drm_i915_error_object {
378
		int page_count;
379
		u32 gtt_offset;
380
		u32 *pages[0];
5060 serge 381
		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
382
 
3031 serge 383
		struct drm_i915_error_request {
384
			long jiffies;
385
			u32 seqno;
386
			u32 tail;
387
		} *requests;
5060 serge 388
 
389
		struct {
390
			u32 gfx_mode;
391
			union {
392
				u64 pdp[4];
393
				u32 pp_dir_base;
394
			};
395
		} vm_info;
396
 
397
		pid_t pid;
398
		char comm[TASK_COMM_LEN];
3031 serge 399
	} ring[I915_NUM_RINGS];
2325 Serge 400
	struct drm_i915_error_buffer {
401
		u32 size;
402
		u32 name;
3031 serge 403
		u32 rseqno, wseqno;
2325 Serge 404
		u32 gtt_offset;
405
		u32 read_domains;
406
		u32 write_domain;
2342 Serge 407
		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
2325 Serge 408
		s32 pinned:2;
409
		u32 tiling:2;
410
		u32 dirty:1;
411
		u32 purgeable:1;
5060 serge 412
		u32 userptr:1;
3031 serge 413
		s32 ring:4;
4560 Serge 414
		u32 cache_level:3;
4104 Serge 415
	} **active_bo, **pinned_bo;
5060 serge 416
 
4104 Serge 417
	u32 *active_bo_count, *pinned_bo_count;
2325 Serge 418
};
419
 
4560 Serge 420
struct intel_connector;
3746 Serge 421
struct intel_crtc_config;
5060 serge 422
struct intel_plane_config;
3746 Serge 423
struct intel_crtc;
4104 Serge 424
struct intel_limit;
425
struct dpll;
3746 Serge 426
 
2325 Serge 427
struct drm_i915_display_funcs {
428
	bool (*fbc_enabled)(struct drm_device *dev);
4560 Serge 429
	void (*enable_fbc)(struct drm_crtc *crtc);
2325 Serge 430
	void (*disable_fbc)(struct drm_device *dev);
431
	int (*get_display_clock_speed)(struct drm_device *dev);
432
	int (*get_fifo_size)(struct drm_device *dev, int plane);
4104 Serge 433
	/**
434
	 * find_dpll() - Find the best values for the PLL
435
	 * @limit: limits for the PLL
436
	 * @crtc: current CRTC
437
	 * @target: target frequency in kHz
438
	 * @refclk: reference clock frequency in kHz
439
	 * @match_clock: if provided, @best_clock P divider must
440
	 *               match the P divider from @match_clock
441
	 *               used for LVDS downclocking
442
	 * @best_clock: best PLL values found
443
	 *
444
	 * Returns true on success, false on failure.
445
	 */
446
	bool (*find_dpll)(const struct intel_limit *limit,
447
			  struct drm_crtc *crtc,
448
			  int target, int refclk,
449
			  struct dpll *match_clock,
450
			  struct dpll *best_clock);
4560 Serge 451
	void (*update_wm)(struct drm_crtc *crtc);
4104 Serge 452
	void (*update_sprite_wm)(struct drm_plane *plane,
453
				 struct drm_crtc *crtc,
5060 serge 454
				 uint32_t sprite_width, uint32_t sprite_height,
455
				 int pixel_size, bool enable, bool scaled);
3243 Serge 456
	void (*modeset_global_resources)(struct drm_device *dev);
3746 Serge 457
	/* Returns the active state of the crtc, and if the crtc is active,
458
	 * fills out the pipe-config with the hw state. */
459
	bool (*get_pipe_config)(struct intel_crtc *,
460
				struct intel_crtc_config *);
5060 serge 461
	void (*get_plane_config)(struct intel_crtc *,
462
				 struct intel_plane_config *);
2325 Serge 463
	int (*crtc_mode_set)(struct drm_crtc *crtc,
464
			     int x, int y,
465
			     struct drm_framebuffer *old_fb);
3031 serge 466
	void (*crtc_enable)(struct drm_crtc *crtc);
467
	void (*crtc_disable)(struct drm_crtc *crtc);
468
	void (*off)(struct drm_crtc *crtc);
2342 Serge 469
	void (*write_eld)(struct drm_connector *connector,
4560 Serge 470
			  struct drm_crtc *crtc,
471
			  struct drm_display_mode *mode);
2325 Serge 472
	void (*fdi_link_train)(struct drm_crtc *crtc);
473
	void (*init_clock_gating)(struct drm_device *dev);
474
	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
475
			  struct drm_framebuffer *fb,
4104 Serge 476
			  struct drm_i915_gem_object *obj,
5060 serge 477
			  struct intel_engine_cs *ring,
4104 Serge 478
			  uint32_t flags);
5060 serge 479
	void (*update_primary_plane)(struct drm_crtc *crtc,
480
				    struct drm_framebuffer *fb,
2325 Serge 481
			    int x, int y);
3480 Serge 482
	void (*hpd_irq_setup)(struct drm_device *dev);
2325 Serge 483
	/* clock updates for mode set */
484
	/* cursor updates */
485
	/* render clock increase/decrease */
486
	/* display clock increase/decrease */
487
	/* pll clock increase/decrease */
4560 Serge 488
 
489
	int (*setup_backlight)(struct intel_connector *connector);
490
	uint32_t (*get_backlight)(struct intel_connector *connector);
491
	void (*set_backlight)(struct intel_connector *connector,
492
			      uint32_t level);
493
	void (*disable_backlight)(struct intel_connector *connector);
494
	void (*enable_backlight)(struct intel_connector *connector);
2325 Serge 495
};
496
 
4104 Serge 497
struct intel_uncore_funcs {
4560 Serge 498
	void (*force_wake_get)(struct drm_i915_private *dev_priv,
499
							int fw_engine);
500
	void (*force_wake_put)(struct drm_i915_private *dev_priv,
501
							int fw_engine);
502
 
503
	uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
504
	uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
505
	uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
506
	uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
507
 
508
	void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
509
				uint8_t val, bool trace);
510
	void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
511
				uint16_t val, bool trace);
512
	void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
513
				uint32_t val, bool trace);
514
	void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
515
				uint64_t val, bool trace);
3031 serge 516
};
517
 
4104 Serge 518
struct intel_uncore {
519
	spinlock_t lock; /** lock is also taken in irq contexts. */
3031 serge 520
 
4104 Serge 521
	struct intel_uncore_funcs funcs;
522
 
523
	unsigned fifo_count;
524
	unsigned forcewake_count;
4560 Serge 525
 
526
	unsigned fw_rendercount;
527
	unsigned fw_mediacount;
528
 
5060 serge 529
	struct timer_list force_wake_timer;
4104 Serge 530
};
531
 
532
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
533
	func(is_mobile) sep \
534
	func(is_i85x) sep \
535
	func(is_i915g) sep \
536
	func(is_i945gm) sep \
537
	func(is_g33) sep \
538
	func(need_gfx_hws) sep \
539
	func(is_g4x) sep \
540
	func(is_pineview) sep \
541
	func(is_broadwater) sep \
542
	func(is_crestline) sep \
543
	func(is_ivybridge) sep \
544
	func(is_valleyview) sep \
545
	func(is_haswell) sep \
4560 Serge 546
	func(is_preliminary) sep \
4104 Serge 547
	func(has_fbc) sep \
548
	func(has_pipe_cxsr) sep \
549
	func(has_hotplug) sep \
550
	func(cursor_needs_physical) sep \
551
	func(has_overlay) sep \
552
	func(overlay_needs_physical) sep \
553
	func(supports_tv) sep \
554
	func(has_llc) sep \
555
	func(has_ddi) sep \
556
	func(has_fpga_dbg)
557
 
558
#define DEFINE_FLAG(name) u8 name:1
559
#define SEP_SEMICOLON ;
560
 
2325 Serge 561
struct intel_device_info {
3480 Serge 562
	u32 display_mmio_offset;
3746 Serge 563
	u8 num_pipes:3;
5060 serge 564
	u8 num_sprites[I915_MAX_PIPES];
2325 Serge 565
	u8 gen;
4560 Serge 566
	u8 ring_mask; /* Rings supported by the HW */
4104 Serge 567
	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
5060 serge 568
	/* Register offsets for the various display pipes and transcoders */
569
	int pipe_offsets[I915_MAX_TRANSCODERS];
570
	int trans_offsets[I915_MAX_TRANSCODERS];
571
	int palette_offsets[I915_MAX_PIPES];
572
	int cursor_offsets[I915_MAX_PIPES];
2325 Serge 573
};
574
 
4104 Serge 575
#undef DEFINE_FLAG
576
#undef SEP_SEMICOLON
577
 
3480 Serge 578
enum i915_cache_level {
579
	I915_CACHE_NONE = 0,
4104 Serge 580
	I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
581
	I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
582
			      caches, eg sampler/render caches, and the
583
			      large Last-Level-Cache. LLC is coherent with
584
			      the CPU, but L3 is only visible to the GPU. */
585
	I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
3480 Serge 586
};
587
 
4104 Serge 588
struct i915_ctx_hang_stats {
589
	/* This context had batch pending when hang was declared */
590
	unsigned batch_pending;
591
 
592
	/* This context had batch active when hang was declared */
593
	unsigned batch_active;
4560 Serge 594
 
595
	/* Time when this context was last blamed for a GPU reset */
596
	unsigned long guilty_ts;
597
 
598
	/* This context is banned to submit more work */
599
	bool banned;
4104 Serge 600
};
601
 
3031 serge 602
/* This must match up with the value previously used for execbuf2.rsvd1. */
5060 serge 603
#define DEFAULT_CONTEXT_HANDLE 0
604
/**
605
 * struct intel_context - as the name implies, represents a context.
606
 * @ref: reference count.
607
 * @user_handle: userspace tracking identity for this context.
608
 * @remap_slice: l3 row remapping information.
609
 * @file_priv: filp associated with this context (NULL for global default
610
 *	       context).
611
 * @hang_stats: information about the role of this context in possible GPU
612
 *		hangs.
613
 * @vm: virtual memory space used by this context.
614
 * @legacy_hw_ctx: render context backing object and whether it is correctly
615
 *                initialized (legacy ring submission mechanism only).
616
 * @link: link in the global list of contexts.
617
 *
618
 * Contexts are memory images used by the hardware to store copies of their
619
 * internal state.
620
 */
621
struct intel_context {
4104 Serge 622
	struct kref ref;
5060 serge 623
	int user_handle;
4560 Serge 624
	uint8_t remap_slice;
3031 serge 625
	struct drm_i915_file_private *file_priv;
4104 Serge 626
	struct i915_ctx_hang_stats hang_stats;
5060 serge 627
	struct i915_address_space *vm;
4560 Serge 628
 
5060 serge 629
	struct {
630
		struct drm_i915_gem_object *rcs_state;
631
		bool initialized;
632
	} legacy_hw_ctx;
633
 
4560 Serge 634
	struct list_head link;
3031 serge 635
};
636
 
4104 Serge 637
struct i915_fbc {
638
	unsigned long size;
5060 serge 639
	unsigned threshold;
4104 Serge 640
	unsigned int fb_id;
641
	enum plane plane;
642
	int y;
643
 
5060 serge 644
	struct drm_mm_node compressed_fb;
4104 Serge 645
	struct drm_mm_node *compressed_llb;
646
 
647
	struct intel_fbc_work {
648
		struct delayed_work work;
649
		struct drm_crtc *crtc;
650
		struct drm_framebuffer *fb;
651
	} *fbc_work;
652
 
4539 Serge 653
	enum no_fbc_reason {
4104 Serge 654
		FBC_OK, /* FBC is enabled */
655
		FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
2325 Serge 656
	FBC_NO_OUTPUT, /* no outputs enabled to compress */
4104 Serge 657
		FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
2325 Serge 658
	FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
659
	FBC_MODE_TOO_LARGE, /* mode too large for compression */
660
	FBC_BAD_PLANE, /* fbc not supported on plane */
661
	FBC_NOT_TILED, /* buffer not tiled */
662
	FBC_MULTIPLE_PIPES, /* more than one pipe active */
663
	FBC_MODULE_PARAM,
4104 Serge 664
		FBC_CHIP_DEFAULT, /* disabled by default on this chip */
665
	} no_fbc_reason;
2325 Serge 666
};
667
 
5060 serge 668
struct i915_drrs {
669
	struct intel_connector *connector;
670
};
671
 
672
struct intel_dp;
4560 Serge 673
struct i915_psr {
5060 serge 674
	struct mutex lock;
4560 Serge 675
	bool sink_support;
676
	bool source_ok;
5060 serge 677
	struct intel_dp *enabled;
678
	bool active;
679
	struct delayed_work work;
680
	unsigned busy_frontbuffer_bits;
4104 Serge 681
};
682
 
2325 Serge 683
enum intel_pch {
3031 serge 684
	PCH_NONE = 0,	/* No PCH present */
2325 Serge 685
	PCH_IBX,	/* Ibexpeak PCH */
686
	PCH_CPT,	/* Cougarpoint PCH */
3031 serge 687
	PCH_LPT,	/* Lynxpoint PCH */
3746 Serge 688
	PCH_NOP,
2325 Serge 689
};
690
 
3243 Serge 691
enum intel_sbi_destination {
692
	SBI_ICLK,
693
	SBI_MPHY,
694
};
695
 
2325 Serge 696
#define QUIRK_PIPEA_FORCE (1<<0)
697
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
3031 serge 698
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
5060 serge 699
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
2325 Serge 700
 
701
struct intel_fbdev;
702
struct intel_fbc_work;
703
 
3031 serge 704
struct intel_gmbus {
705
	struct i2c_adapter adapter;
3243 Serge 706
	u32 force_bit;
3031 serge 707
	u32 reg0;
708
	u32 gpio_reg;
709
	struct i2c_algo_bit_data bit_algo;
710
	struct drm_i915_private *dev_priv;
711
};
712
 
3243 Serge 713
struct i915_suspend_saved_registers {
2325 Serge 714
	u8 saveLBB;
715
	u32 saveDSPACNTR;
716
	u32 saveDSPBCNTR;
717
	u32 saveDSPARB;
718
	u32 savePIPEACONF;
719
	u32 savePIPEBCONF;
720
	u32 savePIPEASRC;
721
	u32 savePIPEBSRC;
722
	u32 saveFPA0;
723
	u32 saveFPA1;
724
	u32 saveDPLL_A;
725
	u32 saveDPLL_A_MD;
726
	u32 saveHTOTAL_A;
727
	u32 saveHBLANK_A;
728
	u32 saveHSYNC_A;
729
	u32 saveVTOTAL_A;
730
	u32 saveVBLANK_A;
731
	u32 saveVSYNC_A;
732
	u32 saveBCLRPAT_A;
733
	u32 saveTRANSACONF;
734
	u32 saveTRANS_HTOTAL_A;
735
	u32 saveTRANS_HBLANK_A;
736
	u32 saveTRANS_HSYNC_A;
737
	u32 saveTRANS_VTOTAL_A;
738
	u32 saveTRANS_VBLANK_A;
739
	u32 saveTRANS_VSYNC_A;
740
	u32 savePIPEASTAT;
741
	u32 saveDSPASTRIDE;
742
	u32 saveDSPASIZE;
743
	u32 saveDSPAPOS;
744
	u32 saveDSPAADDR;
745
	u32 saveDSPASURF;
746
	u32 saveDSPATILEOFF;
747
	u32 savePFIT_PGM_RATIOS;
748
	u32 saveBLC_HIST_CTL;
749
	u32 saveBLC_PWM_CTL;
750
	u32 saveBLC_PWM_CTL2;
4560 Serge 751
	u32 saveBLC_HIST_CTL_B;
2325 Serge 752
	u32 saveBLC_CPU_PWM_CTL;
753
	u32 saveBLC_CPU_PWM_CTL2;
754
	u32 saveFPB0;
755
	u32 saveFPB1;
756
	u32 saveDPLL_B;
757
	u32 saveDPLL_B_MD;
758
	u32 saveHTOTAL_B;
759
	u32 saveHBLANK_B;
760
	u32 saveHSYNC_B;
761
	u32 saveVTOTAL_B;
762
	u32 saveVBLANK_B;
763
	u32 saveVSYNC_B;
764
	u32 saveBCLRPAT_B;
765
	u32 saveTRANSBCONF;
766
	u32 saveTRANS_HTOTAL_B;
767
	u32 saveTRANS_HBLANK_B;
768
	u32 saveTRANS_HSYNC_B;
769
	u32 saveTRANS_VTOTAL_B;
770
	u32 saveTRANS_VBLANK_B;
771
	u32 saveTRANS_VSYNC_B;
772
	u32 savePIPEBSTAT;
773
	u32 saveDSPBSTRIDE;
774
	u32 saveDSPBSIZE;
775
	u32 saveDSPBPOS;
776
	u32 saveDSPBADDR;
777
	u32 saveDSPBSURF;
778
	u32 saveDSPBTILEOFF;
779
	u32 saveVGA0;
780
	u32 saveVGA1;
781
	u32 saveVGA_PD;
782
	u32 saveVGACNTRL;
783
	u32 saveADPA;
784
	u32 saveLVDS;
785
	u32 savePP_ON_DELAYS;
786
	u32 savePP_OFF_DELAYS;
787
	u32 saveDVOA;
788
	u32 saveDVOB;
789
	u32 saveDVOC;
790
	u32 savePP_ON;
791
	u32 savePP_OFF;
792
	u32 savePP_CONTROL;
793
	u32 savePP_DIVISOR;
794
	u32 savePFIT_CONTROL;
795
	u32 save_palette_a[256];
796
	u32 save_palette_b[256];
797
	u32 saveFBC_CONTROL;
798
	u32 saveIER;
799
	u32 saveIIR;
800
	u32 saveIMR;
801
	u32 saveDEIER;
802
	u32 saveDEIMR;
803
	u32 saveGTIER;
804
	u32 saveGTIMR;
805
	u32 saveFDI_RXA_IMR;
806
	u32 saveFDI_RXB_IMR;
807
	u32 saveCACHE_MODE_0;
808
	u32 saveMI_ARB_STATE;
809
	u32 saveSWF0[16];
810
	u32 saveSWF1[16];
811
	u32 saveSWF2[3];
812
	u8 saveMSR;
813
	u8 saveSR[8];
814
	u8 saveGR[25];
815
	u8 saveAR_INDEX;
816
	u8 saveAR[21];
817
	u8 saveDACMASK;
818
	u8 saveCR[37];
2342 Serge 819
	uint64_t saveFENCE[I915_MAX_NUM_FENCES];
2325 Serge 820
	u32 saveCURACNTR;
821
	u32 saveCURAPOS;
822
	u32 saveCURABASE;
823
	u32 saveCURBCNTR;
824
	u32 saveCURBPOS;
825
	u32 saveCURBBASE;
826
	u32 saveCURSIZE;
827
	u32 saveDP_B;
828
	u32 saveDP_C;
829
	u32 saveDP_D;
830
	u32 savePIPEA_GMCH_DATA_M;
831
	u32 savePIPEB_GMCH_DATA_M;
832
	u32 savePIPEA_GMCH_DATA_N;
833
	u32 savePIPEB_GMCH_DATA_N;
834
	u32 savePIPEA_DP_LINK_M;
835
	u32 savePIPEB_DP_LINK_M;
836
	u32 savePIPEA_DP_LINK_N;
837
	u32 savePIPEB_DP_LINK_N;
838
	u32 saveFDI_RXA_CTL;
839
	u32 saveFDI_TXA_CTL;
840
	u32 saveFDI_RXB_CTL;
841
	u32 saveFDI_TXB_CTL;
842
	u32 savePFA_CTL_1;
843
	u32 savePFB_CTL_1;
844
	u32 savePFA_WIN_SZ;
845
	u32 savePFB_WIN_SZ;
846
	u32 savePFA_WIN_POS;
847
	u32 savePFB_WIN_POS;
848
	u32 savePCH_DREF_CONTROL;
849
	u32 saveDISP_ARB_CTL;
850
	u32 savePIPEA_DATA_M1;
851
	u32 savePIPEA_DATA_N1;
852
	u32 savePIPEA_LINK_M1;
853
	u32 savePIPEA_LINK_N1;
854
	u32 savePIPEB_DATA_M1;
855
	u32 savePIPEB_DATA_N1;
856
	u32 savePIPEB_LINK_M1;
857
	u32 savePIPEB_LINK_N1;
858
	u32 saveMCHBAR_RENDER_STANDBY;
859
	u32 savePCH_PORT_HOTPLUG;
3243 Serge 860
};
2325 Serge 861
 
5060 serge 862
struct vlv_s0ix_state {
863
	/* GAM */
864
	u32 wr_watermark;
865
	u32 gfx_prio_ctrl;
866
	u32 arb_mode;
867
	u32 gfx_pend_tlb0;
868
	u32 gfx_pend_tlb1;
869
	u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
870
	u32 media_max_req_count;
871
	u32 gfx_max_req_count;
872
	u32 render_hwsp;
873
	u32 ecochk;
874
	u32 bsd_hwsp;
875
	u32 blt_hwsp;
876
	u32 tlb_rd_addr;
877
 
878
	/* MBC */
879
	u32 g3dctl;
880
	u32 gsckgctl;
881
	u32 mbctl;
882
 
883
	/* GCP */
884
	u32 ucgctl1;
885
	u32 ucgctl3;
886
	u32 rcgctl1;
887
	u32 rcgctl2;
888
	u32 rstctl;
889
	u32 misccpctl;
890
 
891
	/* GPM */
892
	u32 gfxpause;
893
	u32 rpdeuhwtc;
894
	u32 rpdeuc;
895
	u32 ecobus;
896
	u32 pwrdwnupctl;
897
	u32 rp_down_timeout;
898
	u32 rp_deucsw;
899
	u32 rcubmabdtmr;
900
	u32 rcedata;
901
	u32 spare2gh;
902
 
903
	/* Display 1 CZ domain */
904
	u32 gt_imr;
905
	u32 gt_ier;
906
	u32 pm_imr;
907
	u32 pm_ier;
908
	u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
909
 
910
	/* GT SA CZ domain */
911
	u32 tilectl;
912
	u32 gt_fifoctl;
913
	u32 gtlc_wake_ctrl;
914
	u32 gtlc_survive;
915
	u32 pmwgicz;
916
 
917
	/* Display 2 CZ domain */
918
	u32 gu_ctl0;
919
	u32 gu_ctl1;
920
	u32 clock_gate_dis2;
921
};
922
 
923
struct intel_rps_ei {
924
	u32 cz_clock;
925
	u32 render_c0;
926
	u32 media_c0;
927
};
928
 
3243 Serge 929
struct intel_gen6_power_mgmt {
4104 Serge 930
	/* work and pm_iir are protected by dev_priv->irq_lock */
3243 Serge 931
	struct work_struct work;
932
	u32 pm_iir;
933
 
5060 serge 934
	/* Frequencies are stored in potentially platform dependent multiples.
935
	 * In other words, *_freq needs to be multiplied by X to be interesting.
936
	 * Soft limits are those which are used for the dynamic reclocking done
937
	 * by the driver (raise frequencies under heavy loads, and lower for
938
	 * lighter loads). Hard limits are those imposed by the hardware.
939
	 *
940
	 * A distinction is made for overclocking, which is never enabled by
941
	 * default, and is considered to be above the hard limit if it's
942
	 * possible at all.
943
	 */
944
	u8 cur_freq;		/* Current frequency (cached, may not == HW) */
945
	u8 min_freq_softlimit;	/* Minimum frequency permitted by the driver */
946
	u8 max_freq_softlimit;	/* Max frequency permitted by the driver */
947
	u8 max_freq;		/* Maximum frequency, RP0 if not overclocking */
948
	u8 min_freq;		/* AKA RPn. Minimum frequency */
949
	u8 efficient_freq;	/* AKA RPe. Pre-determined balanced frequency */
950
	u8 rp1_freq;		/* "less than" RP0 power/freqency */
951
	u8 rp0_freq;		/* Non-overclocked max frequency. */
952
	u32 cz_freq;
3243 Serge 953
 
5060 serge 954
	u32 ei_interrupt_count;
955
 
4560 Serge 956
	int last_adj;
957
	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
958
 
959
	bool enabled;
3243 Serge 960
	struct delayed_work delayed_resume_work;
961
 
5060 serge 962
	/* manual wa residency calculations */
963
	struct intel_rps_ei up_ei, down_ei;
964
 
3243 Serge 965
	/*
966
	 * Protects RPS/RC6 register access and PCU communication.
967
	 * Must be taken after struct_mutex if nested.
968
	 */
969
	struct mutex hw_lock;
970
};
971
 
3480 Serge 972
/* defined intel_pm.c */
973
extern spinlock_t mchdev_lock;
974
 
3243 Serge 975
struct intel_ilk_power_mgmt {
976
	u8 cur_delay;
977
	u8 min_delay;
978
	u8 max_delay;
979
	u8 fmax;
980
	u8 fstart;
981
 
982
	u64 last_count1;
983
	unsigned long last_time1;
984
	unsigned long chipset_power;
985
	u64 last_count2;
5060 serge 986
	u64 last_time2;
3243 Serge 987
	unsigned long gfx_power;
988
	u8 corr;
989
 
990
	int c_m;
991
	int r_t;
992
 
993
	struct drm_i915_gem_object *pwrctx;
994
	struct drm_i915_gem_object *renderctx;
995
};
996
 
5060 serge 997
struct drm_i915_private;
998
struct i915_power_well;
999
 
1000
struct i915_power_well_ops {
1001
	/*
1002
	 * Synchronize the well's hw state to match the current sw state, for
1003
	 * example enable/disable it based on the current refcount. Called
1004
	 * during driver init and resume time, possibly after first calling
1005
	 * the enable/disable handlers.
1006
	 */
1007
	void (*sync_hw)(struct drm_i915_private *dev_priv,
1008
			struct i915_power_well *power_well);
1009
	/*
1010
	 * Enable the well and resources that depend on it (for example
1011
	 * interrupts located on the well). Called after the 0->1 refcount
1012
	 * transition.
1013
	 */
1014
	void (*enable)(struct drm_i915_private *dev_priv,
1015
		       struct i915_power_well *power_well);
1016
	/*
1017
	 * Disable the well and resources that depend on it. Called after
1018
	 * the 1->0 refcount transition.
1019
	 */
1020
	void (*disable)(struct drm_i915_private *dev_priv,
1021
			struct i915_power_well *power_well);
1022
	/* Returns the hw enabled state. */
1023
	bool (*is_enabled)(struct drm_i915_private *dev_priv,
1024
			   struct i915_power_well *power_well);
1025
};
1026
 
4104 Serge 1027
/* Power well structure for haswell */
1028
struct i915_power_well {
4560 Serge 1029
	const char *name;
1030
	bool always_on;
4104 Serge 1031
	/* power well enable/disable usage count */
1032
	int count;
5060 serge 1033
	/* cached hw enabled state */
1034
	bool hw_enabled;
4560 Serge 1035
	unsigned long domains;
5060 serge 1036
	unsigned long data;
1037
	const struct i915_power_well_ops *ops;
4104 Serge 1038
};
1039
 
4560 Serge 1040
struct i915_power_domains {
1041
	/*
1042
	 * Power wells needed for initialization at driver init and suspend
1043
	 * time are on. They are kept on until after the first modeset.
1044
	 */
1045
	bool init_power_on;
5060 serge 1046
	bool initializing;
4560 Serge 1047
	int power_well_count;
1048
 
1049
	struct mutex lock;
1050
	int domain_use_count[POWER_DOMAIN_NUM];
1051
	struct i915_power_well *power_wells;
1052
};
1053
 
3243 Serge 1054
struct i915_dri1_state {
1055
	unsigned allow_batchbuffer : 1;
1056
	u32 __iomem *gfx_hws_cpu_addr;
1057
 
1058
	unsigned int cpp;
1059
	int back_offset;
1060
	int front_offset;
1061
	int current_page;
1062
	int page_flipping;
1063
 
1064
	uint32_t counter;
1065
};
1066
 
4104 Serge 1067
struct i915_ums_state {
1068
	/**
1069
	 * Flag if the X Server, and thus DRM, is not currently in
1070
	 * control of the device.
1071
	 *
1072
	 * This is set between LeaveVT and EnterVT.  It needs to be
1073
	 * replaced with a semaphore.  It also needs to be
1074
	 * transitioned away from for kernel modesetting.
1075
	 */
1076
	int mm_suspended;
1077
};
1078
 
4560 Serge 1079
#define MAX_L3_SLICES 2
3243 Serge 1080
struct intel_l3_parity {
4560 Serge 1081
	u32 *remap_info[MAX_L3_SLICES];
3243 Serge 1082
	struct work_struct error_work;
4560 Serge 1083
	int which_slice;
3243 Serge 1084
};
1085
 
3480 Serge 1086
struct i915_gem_mm {
1087
	/** Memory allocator for GTT stolen memory */
1088
	struct drm_mm stolen;
1089
	/** List of all objects in gtt_space. Used to restore gtt
1090
	 * mappings on resume */
1091
	struct list_head bound_list;
1092
	/**
1093
	 * List of objects which are not bound to the GTT (thus
1094
	 * are idle and not used by the GPU) but still have
1095
	 * (presumably uncached) pages still attached.
1096
	 */
1097
	struct list_head unbound_list;
1098
 
1099
	/** Usable portion of the GTT for GEM */
1100
	unsigned long stolen_base; /* limited to low memory (32-bit) */
1101
 
1102
	/** PPGTT used for aliasing the PPGTT with the GTT */
1103
	struct i915_hw_ppgtt *aliasing_ppgtt;
1104
 
1105
	/** LRU list of objects with fence regs on them. */
1106
	struct list_head fence_list;
1107
 
1108
	/**
1109
	 * We leave the user IRQ off as much as possible,
1110
	 * but this means that requests will finish and never
1111
	 * be retired once the system goes idle. Set a timer to
1112
	 * fire periodically while the ring is running. When it
1113
	 * fires, go retire requests.
1114
	 */
1115
	struct delayed_work retire_work;
1116
 
1117
	/**
4560 Serge 1118
	 * When we detect an idle GPU, we want to turn on
1119
	 * powersaving features. So once we see that there
1120
	 * are no more requests outstanding and no more
1121
	 * arrive within a small period of time, we fire
1122
	 * off the idle_work.
1123
	 */
1124
	struct delayed_work idle_work;
1125
 
1126
	/**
3480 Serge 1127
	 * Are we in a non-interruptible section of code like
1128
	 * modesetting?
1129
	 */
1130
	bool interruptible;
1131
 
5060 serge 1132
	/**
1133
	 * Is the GPU currently considered idle, or busy executing userspace
1134
	 * requests?  Whilst idle, we attempt to power down the hardware and
1135
	 * display clocks. In order to reduce the effect on performance, there
1136
	 * is a slight delay before we do so.
1137
	 */
1138
	bool busy;
1139
 
1140
	/* the indicator for dispatch video commands on two BSD rings */
1141
	int bsd_ring_dispatch_index;
1142
 
3480 Serge 1143
	/** Bit 6 swizzling required for X tiling */
1144
	uint32_t bit_6_swizzle_x;
1145
	/** Bit 6 swizzling required for Y tiling */
1146
	uint32_t bit_6_swizzle_y;
1147
 
1148
	/* accounting, useful for userland debugging */
4104 Serge 1149
	spinlock_t object_stat_lock;
3480 Serge 1150
	size_t object_memory;
1151
	u32 object_count;
1152
};
1153
 
4104 Serge 1154
struct drm_i915_error_state_buf {
1155
	unsigned bytes;
1156
	unsigned size;
1157
	int err;
1158
	u8 *buf;
1159
	loff_t start;
1160
	loff_t pos;
1161
};
1162
 
1163
struct i915_error_state_file_priv {
1164
	struct drm_device *dev;
1165
	struct drm_i915_error_state *error;
1166
};
1167
 
3480 Serge 1168
struct i915_gpu_error {
1169
	/* For hangcheck timer */
1170
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
1171
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
4560 Serge 1172
	/* Hang gpu twice in this window and your context gets banned */
1173
#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
1174
 
3480 Serge 1175
	struct timer_list hangcheck_timer;
1176
 
1177
	/* For reset and error_state handling. */
1178
	spinlock_t lock;
1179
	/* Protected by the above dev->gpu_error.lock. */
1180
	struct drm_i915_error_state *first_error;
1181
	struct work_struct work;
1182
 
1183
 
4560 Serge 1184
	unsigned long missed_irq_rings;
1185
 
3480 Serge 1186
	/**
4560 Serge 1187
	 * State variable controlling the reset flow and count
3480 Serge 1188
	 *
4560 Serge 1189
	 * This is a counter which gets incremented when reset is triggered,
1190
	 * and again when reset has been handled. So odd values (lowest bit set)
1191
	 * means that reset is in progress and even values that
1192
	 * (reset_counter >> 1):th reset was successfully completed.
3480 Serge 1193
	 *
4560 Serge 1194
	 * If reset is not completed succesfully, the I915_WEDGE bit is
1195
	 * set meaning that hardware is terminally sour and there is no
1196
	 * recovery. All waiters on the reset_queue will be woken when
1197
	 * that happens.
1198
	 *
1199
	 * This counter is used by the wait_seqno code to notice that reset
1200
	 * event happened and it needs to restart the entire ioctl (since most
1201
	 * likely the seqno it waited for won't ever signal anytime soon).
1202
	 *
3480 Serge 1203
	 * This is important for lock-free wait paths, where no contended lock
1204
	 * naturally enforces the correct ordering between the bail-out of the
1205
	 * waiter and the gpu reset work code.
1206
	 */
1207
	atomic_t reset_counter;
1208
 
1209
#define I915_RESET_IN_PROGRESS_FLAG	1
4560 Serge 1210
#define I915_WEDGED			(1 << 31)
3480 Serge 1211
 
1212
	/**
1213
	 * Waitqueue to signal when the reset has completed. Used by clients
1214
	 * that wait for dev_priv->mm.wedged to settle.
1215
	 */
1216
	wait_queue_head_t reset_queue;
1217
 
5060 serge 1218
	/* Userspace knobs for gpu hang simulation;
1219
	 * combines both a ring mask, and extra flags
1220
	 */
1221
	u32 stop_rings;
1222
#define I915_STOP_RING_ALLOW_BAN       (1 << 31)
1223
#define I915_STOP_RING_ALLOW_WARN      (1 << 30)
4560 Serge 1224
 
1225
	/* For missed irq/seqno simulation. */
1226
	unsigned int test_irq_rings;
3480 Serge 1227
};
1228
 
1229
enum modeset_restore {
1230
	MODESET_ON_LID_OPEN,
1231
	MODESET_DONE,
1232
	MODESET_SUSPENDED,
1233
};
1234
 
4560 Serge 1235
struct ddi_vbt_port_info {
1236
	uint8_t hdmi_level_shift;
1237
 
1238
	uint8_t supports_dvi:1;
1239
	uint8_t supports_hdmi:1;
1240
	uint8_t supports_dp:1;
1241
};
1242
 
5060 serge 1243
enum drrs_support_type {
1244
	DRRS_NOT_SUPPORTED = 0,
1245
	STATIC_DRRS_SUPPORT = 1,
1246
	SEAMLESS_DRRS_SUPPORT = 2
1247
};
1248
 
4104 Serge 1249
struct intel_vbt_data {
1250
	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1251
	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1252
 
1253
	/* Feature bits */
1254
	unsigned int int_tv_support:1;
1255
	unsigned int lvds_dither:1;
1256
	unsigned int lvds_vbt:1;
1257
	unsigned int int_crt_support:1;
1258
	unsigned int lvds_use_ssc:1;
1259
	unsigned int display_clock_mode:1;
1260
	unsigned int fdi_rx_polarity_inverted:1;
5060 serge 1261
	unsigned int has_mipi:1;
4104 Serge 1262
	int lvds_ssc_freq;
1263
	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1264
 
5060 serge 1265
	enum drrs_support_type drrs_type;
1266
 
4104 Serge 1267
	/* eDP */
1268
	int edp_rate;
1269
	int edp_lanes;
1270
	int edp_preemphasis;
1271
	int edp_vswing;
1272
	bool edp_initialized;
1273
	bool edp_support;
1274
	int edp_bpp;
1275
	struct edp_power_seq edp_pps;
1276
 
4560 Serge 1277
	struct {
1278
		u16 pwm_freq_hz;
5060 serge 1279
		bool present;
4560 Serge 1280
		bool active_low_pwm;
5060 serge 1281
		u8 min_brightness;	/* min_brightness/255 of max */
4560 Serge 1282
	} backlight;
1283
 
1284
	/* MIPI DSI */
1285
	struct {
5060 serge 1286
		u16 port;
4560 Serge 1287
		u16 panel_id;
5060 serge 1288
		struct mipi_config *config;
1289
		struct mipi_pps_data *pps;
1290
		u8 seq_version;
1291
		u32 size;
1292
		u8 *data;
1293
		u8 *sequence[MIPI_SEQ_MAX];
4560 Serge 1294
	} dsi;
1295
 
4104 Serge 1296
	int crt_ddc_pin;
1297
 
1298
	int child_dev_num;
4560 Serge 1299
	union child_device_config *child_dev;
1300
 
1301
	struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
4104 Serge 1302
};
1303
 
1304
enum intel_ddb_partitioning {
1305
	INTEL_DDB_PART_1_2,
1306
	INTEL_DDB_PART_5_6, /* IVB+ */
1307
};
1308
 
1309
struct intel_wm_level {
1310
	bool enable;
1311
	uint32_t pri_val;
1312
	uint32_t spr_val;
1313
	uint32_t cur_val;
1314
	uint32_t fbc_val;
1315
};
1316
 
4560 Serge 1317
struct ilk_wm_values {
1318
	uint32_t wm_pipe[3];
1319
	uint32_t wm_lp[3];
1320
	uint32_t wm_lp_spr[3];
1321
	uint32_t wm_linetime[3];
1322
	bool enable_fbc_wm;
1323
	enum intel_ddb_partitioning partitioning;
1324
};
1325
 
4104 Serge 1326
/*
5060 serge 1327
 * This struct helps tracking the state needed for runtime PM, which puts the
1328
 * device in PCI D3 state. Notice that when this happens, nothing on the
1329
 * graphics device works, even register access, so we don't get interrupts nor
1330
 * anything else.
4104 Serge 1331
 *
5060 serge 1332
 * Every piece of our code that needs to actually touch the hardware needs to
1333
 * either call intel_runtime_pm_get or call intel_display_power_get with the
1334
 * appropriate power domain.
4104 Serge 1335
 *
5060 serge 1336
 * Our driver uses the autosuspend delay feature, which means we'll only really
1337
 * suspend if we stay with zero refcount for a certain amount of time. The
1338
 * default value is currently very conservative (see intel_init_runtime_pm), but
1339
 * it can be changed with the standard runtime PM files from sysfs.
4104 Serge 1340
 *
1341
 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1342
 * goes back to false exactly before we reenable the IRQs. We use this variable
1343
 * to check if someone is trying to enable/disable IRQs while they're supposed
1344
 * to be disabled. This shouldn't happen and we'll print some error messages in
5060 serge 1345
 * case it happens.
4104 Serge 1346
 *
5060 serge 1347
 * For more, read the Documentation/power/runtime_pm.txt.
4104 Serge 1348
 */
4560 Serge 1349
struct i915_runtime_pm {
1350
	bool suspended;
5060 serge 1351
	bool _irqs_disabled;
4560 Serge 1352
};
1353
 
1354
enum intel_pipe_crc_source {
1355
	INTEL_PIPE_CRC_SOURCE_NONE,
1356
	INTEL_PIPE_CRC_SOURCE_PLANE1,
1357
	INTEL_PIPE_CRC_SOURCE_PLANE2,
1358
	INTEL_PIPE_CRC_SOURCE_PF,
1359
	INTEL_PIPE_CRC_SOURCE_PIPE,
1360
	/* TV/DP on pre-gen5/vlv can't use the pipe source. */
1361
	INTEL_PIPE_CRC_SOURCE_TV,
1362
	INTEL_PIPE_CRC_SOURCE_DP_B,
1363
	INTEL_PIPE_CRC_SOURCE_DP_C,
1364
	INTEL_PIPE_CRC_SOURCE_DP_D,
1365
	INTEL_PIPE_CRC_SOURCE_AUTO,
1366
	INTEL_PIPE_CRC_SOURCE_MAX,
1367
};
1368
 
1369
struct intel_pipe_crc_entry {
1370
	uint32_t frame;
1371
	uint32_t crc[5];
1372
};
1373
 
1374
#define INTEL_PIPE_CRC_ENTRIES_NR	128
1375
struct intel_pipe_crc {
1376
	spinlock_t lock;
1377
	bool opened;		/* exclusive access to the result file */
1378
	struct intel_pipe_crc_entry *entries;
1379
	enum intel_pipe_crc_source source;
1380
	int head, tail;
1381
	wait_queue_head_t wq;
1382
};
1383
 
5060 serge 1384
struct i915_frontbuffer_tracking {
1385
	struct mutex lock;
1386
 
1387
	/*
1388
	 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
1389
	 * scheduled flips.
1390
	 */
1391
	unsigned busy_bits;
1392
	unsigned flip_bits;
1393
};
1394
 
1395
struct drm_i915_private {
3243 Serge 1396
	struct drm_device *dev;
1397
 
5060 serge 1398
	const struct intel_device_info info;
3243 Serge 1399
 
1400
	int relative_constants_mode;
1401
 
1402
	void __iomem *regs;
1403
 
4104 Serge 1404
	struct intel_uncore uncore;
3243 Serge 1405
 
1406
	struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
1407
 
3480 Serge 1408
 
3243 Serge 1409
	/** gmbus_mutex protects against concurrent usage of the single hw gmbus
1410
	 * controller on different i2c buses. */
1411
	struct mutex gmbus_mutex;
1412
 
1413
	/**
1414
	 * Base address of the gmbus and gpio block.
1415
	 */
1416
	uint32_t gpio_mmio_base;
1417
 
5060 serge 1418
	/* MMIO base address for MIPI regs */
1419
	uint32_t mipi_mmio_base;
1420
 
3480 Serge 1421
	wait_queue_head_t gmbus_wait_queue;
1422
 
3243 Serge 1423
	struct pci_dev *bridge_dev;
5060 serge 1424
	struct intel_engine_cs ring[I915_NUM_RINGS];
1425
	struct drm_i915_gem_object *semaphore_obj;
3480 Serge 1426
	uint32_t last_seqno, next_seqno;
3243 Serge 1427
 
1428
	drm_dma_handle_t *status_page_dmah;
1429
	struct resource mch_res;
1430
 
1431
	/* protects the irq masks */
1432
	spinlock_t irq_lock;
1433
 
5060 serge 1434
	/* protects the mmio flip data */
1435
	spinlock_t mmio_flip_lock;
1436
 
1437
	bool display_irqs_enabled;
1438
 
3480 Serge 1439
	/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1440
//	struct pm_qos_request pm_qos;
1441
 
3243 Serge 1442
	/* DPIO indirect register protection */
3480 Serge 1443
	struct mutex dpio_lock;
3243 Serge 1444
 
1445
	/** Cached value of IMR to avoid reads in updating the bitfield */
4560 Serge 1446
	union {
3243 Serge 1447
	u32 irq_mask;
4560 Serge 1448
		u32 de_irq_mask[I915_MAX_PIPES];
1449
	};
3243 Serge 1450
	u32 gt_irq_mask;
4104 Serge 1451
	u32 pm_irq_mask;
5060 serge 1452
	u32 pm_rps_events;
1453
	u32 pipestat_irq_mask[I915_MAX_PIPES];
3243 Serge 1454
 
1455
	struct work_struct hotplug_work;
3746 Serge 1456
	struct {
1457
		unsigned long hpd_last_jiffies;
1458
		int hpd_cnt;
1459
		enum {
1460
			HPD_ENABLED = 0,
1461
			HPD_DISABLED = 1,
1462
			HPD_MARK_DISABLED = 2
1463
		} hpd_mark;
1464
	} hpd_stats[HPD_NUM_PINS];
4104 Serge 1465
	u32 hpd_event_bits;
5060 serge 1466
	struct delayed_work hotplug_reenable_work;
3243 Serge 1467
 
4104 Serge 1468
	struct i915_fbc fbc;
5060 serge 1469
	struct i915_drrs drrs;
3243 Serge 1470
	struct intel_opregion opregion;
4104 Serge 1471
	struct intel_vbt_data vbt;
3243 Serge 1472
 
1473
	/* overlay */
1474
	struct intel_overlay *overlay;
1475
 
4560 Serge 1476
	/* backlight registers and fields in struct intel_panel */
1477
	spinlock_t backlight_lock;
3746 Serge 1478
 
3243 Serge 1479
	/* LVDS info */
1480
	bool no_aux_handshake;
1481
 
1482
	struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1483
	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1484
	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1485
 
1486
	unsigned int fsb_freq, mem_freq, is_ddr3;
5060 serge 1487
	unsigned int vlv_cdclk_freq;
3243 Serge 1488
 
4104 Serge 1489
	/**
1490
	 * wq - Driver workqueue for GEM.
1491
	 *
1492
	 * NOTE: Work items scheduled here are not allowed to grab any modeset
1493
	 * locks, for otherwise the flushing done in the pageflip code will
1494
	 * result in deadlocks.
1495
	 */
3243 Serge 1496
	struct workqueue_struct *wq;
1497
 
1498
	/* Display functions */
1499
	struct drm_i915_display_funcs display;
1500
 
1501
	/* PCH chipset type */
1502
	enum intel_pch pch_type;
1503
	unsigned short pch_id;
1504
 
1505
	unsigned long quirks;
1506
 
3480 Serge 1507
	enum modeset_restore modeset_restore;
1508
	struct mutex modeset_restore_lock;
3243 Serge 1509
 
4104 Serge 1510
	struct list_head vm_list; /* Global list of all address spaces */
5060 serge 1511
	struct i915_gtt gtt; /* VM representing the global address space */
2325 Serge 1512
 
3480 Serge 1513
	struct i915_gem_mm mm;
5060 serge 1514
#if defined(CONFIG_MMU_NOTIFIER)
1515
	DECLARE_HASHTABLE(mmu_notifiers, 7);
1516
#endif
2325 Serge 1517
 
3031 serge 1518
	/* Kernel Modesetting */
1519
 
2327 Serge 1520
    struct sdvo_device_mapping sdvo_mappings[2];
2325 Serge 1521
 
5060 serge 1522
	struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
1523
	struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
2352 Serge 1524
	wait_queue_head_t pending_flip_queue;
2325 Serge 1525
 
4560 Serge 1526
#ifdef CONFIG_DEBUG_FS
1527
	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1528
#endif
1529
 
4104 Serge 1530
	int num_shared_dpll;
1531
	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
4560 Serge 1532
	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
3031 serge 1533
 
2325 Serge 1534
	/* Reclocking support */
1535
	bool render_reclock_avail;
1536
	bool lvds_downclock_avail;
1537
	/* indicates the reduced downclock for LVDS*/
1538
	int lvds_downclock;
5060 serge 1539
 
1540
	struct i915_frontbuffer_tracking fb_tracking;
1541
 
2325 Serge 1542
	u16 orig_clock;
1543
 
1544
	bool mchbar_need_disable;
1545
 
3243 Serge 1546
	struct intel_l3_parity l3_parity;
1547
 
4104 Serge 1548
	/* Cannot be determined by PCIID. You must always read a register. */
1549
	size_t ellc_size;
1550
 
3031 serge 1551
	/* gen6+ rps state */
3243 Serge 1552
	struct intel_gen6_power_mgmt rps;
2325 Serge 1553
 
3031 serge 1554
	/* ilk-only ips/rps state. Everything in here is protected by the global
1555
	 * mchdev_lock in intel_pm.c */
3243 Serge 1556
	struct intel_ilk_power_mgmt ips;
2325 Serge 1557
 
4560 Serge 1558
	struct i915_power_domains power_domains;
2325 Serge 1559
 
4560 Serge 1560
	struct i915_psr psr;
2325 Serge 1561
 
3480 Serge 1562
	struct i915_gpu_error gpu_error;
2325 Serge 1563
 
4104 Serge 1564
	struct drm_i915_gem_object *vlv_pctx;
1565
 
4560 Serge 1566
#ifdef CONFIG_DRM_I915_FBDEV
2325 Serge 1567
	/* list of fbdev register on this device */
2332 Serge 1568
    struct intel_fbdev *fbdev;
4560 Serge 1569
#endif
2325 Serge 1570
 
3243 Serge 1571
	/*
1572
	 * The console may be contended at resume, but we don't
1573
	 * want it to block on it.
1574
	 */
1575
	struct work_struct console_resume_work;
1576
 
3031 serge 1577
	struct drm_property *broadcast_rgb_property;
1578
	struct drm_property *force_audio_property;
1579
 
1580
	uint32_t hw_context_size;
4560 Serge 1581
	struct list_head context_list;
3243 Serge 1582
 
3480 Serge 1583
	u32 fdi_rx_config;
3243 Serge 1584
 
5060 serge 1585
	u32 suspend_count;
3243 Serge 1586
	struct i915_suspend_saved_registers regfile;
5060 serge 1587
	struct vlv_s0ix_state vlv_s0ix_state;
3243 Serge 1588
 
4104 Serge 1589
	struct {
1590
		/*
1591
		 * Raw watermark latency values:
1592
		 * in 0.1us units for WM0,
1593
		 * in 0.5us units for WM1+.
1594
		 */
1595
		/* primary */
1596
		uint16_t pri_latency[5];
1597
		/* sprite */
1598
		uint16_t spr_latency[5];
1599
		/* cursor */
1600
		uint16_t cur_latency[5];
4560 Serge 1601
 
1602
		/* current hardware state */
1603
		struct ilk_wm_values hw;
4104 Serge 1604
	} wm;
1605
 
4560 Serge 1606
	struct i915_runtime_pm pm;
1607
 
5060 serge 1608
	struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
1609
	u32 long_hpd_port_mask;
1610
	u32 short_hpd_port_mask;
1611
	struct work_struct dig_port_work;
1612
 
1613
	/*
1614
	 * if we get a HPD irq from DP and a HPD irq from non-DP
1615
	 * the non-DP HPD could block the workqueue on a mode config
1616
	 * mutex getting, that userspace may have taken. However
1617
	 * userspace is waiting on the DP workqueue to run which is
1618
	 * blocked behind the non-DP one.
1619
	 */
1620
	struct workqueue_struct *dp_wq;
1621
 
3243 Serge 1622
	/* Old dri1 support infrastructure, beware the dragons ya fools entering
1623
	 * here! */
1624
	struct i915_dri1_state dri1;
4104 Serge 1625
	/* Old ums support infrastructure, same warning applies. */
1626
	struct i915_ums_state ums;
2325 Serge 1627
 
5060 serge 1628
	/*
1629
	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
1630
	 * will be rejected. Instead look for a better place.
1631
	 */
1632
};
1633
 
4104 Serge 1634
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1635
{
1636
	return dev->dev_private;
1637
}
1638
 
3031 serge 1639
/* Iterate over initialised rings */
1640
#define for_each_ring(ring__, dev_priv__, i__) \
1641
	for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1642
		if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
1643
 
1644
enum hdmi_force_audio {
1645
	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
1646
	HDMI_AUDIO_OFF,			/* force turn off HDMI audio */
1647
	HDMI_AUDIO_AUTO,		/* trust EDID */
1648
	HDMI_AUDIO_ON,			/* force turn on HDMI audio */
1649
};
1650
 
4104 Serge 1651
#define I915_GTT_OFFSET_NONE ((u32)-1)
2325 Serge 1652
 
3031 serge 1653
struct drm_i915_gem_object_ops {
1654
	/* Interface between the GEM object and its backing storage.
1655
	 * get_pages() is called once prior to the use of the associated set
1656
	 * of pages before to binding them into the GTT, and put_pages() is
1657
	 * called after we no longer need them. As we expect there to be
1658
	 * associated cost with migrating pages between the backing storage
1659
	 * and making them available for the GPU (e.g. clflush), we may hold
1660
	 * onto the pages after they are no longer referenced by the GPU
1661
	 * in case they may be used again shortly (for example migrating the
1662
	 * pages to a different memory domain within the GTT). put_pages()
1663
	 * will therefore most likely be called when the object itself is
1664
	 * being released or under memory pressure (where we attempt to
1665
	 * reap pages for the shrinker).
1666
	 */
1667
	int (*get_pages)(struct drm_i915_gem_object *);
1668
	void (*put_pages)(struct drm_i915_gem_object *);
5060 serge 1669
	int (*dmabuf_export)(struct drm_i915_gem_object *);
1670
	void (*release)(struct drm_i915_gem_object *);
3031 serge 1671
};
1672
 
5060 serge 1673
/*
1674
 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
1675
 * considered to be the frontbuffer for the given plane interface-vise. This
1676
 * doesn't mean that the hw necessarily already scans it out, but that any
1677
 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
1678
 *
1679
 * We have one bit per pipe and per scanout plane type.
1680
 */
1681
#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4
1682
#define INTEL_FRONTBUFFER_BITS \
1683
	(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
1684
#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
1685
	(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
1686
#define INTEL_FRONTBUFFER_CURSOR(pipe) \
1687
	(1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
1688
#define INTEL_FRONTBUFFER_SPRITE(pipe) \
1689
	(1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
1690
#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
1691
	(1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
1692
#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
1693
	(0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
1694
 
2327 Serge 1695
struct drm_i915_gem_object {
1696
    struct drm_gem_object base;
2325 Serge 1697
 
3031 serge 1698
	const struct drm_i915_gem_object_ops *ops;
1699
 
4104 Serge 1700
	/** List of VMAs backed by this object */
1701
	struct list_head vma_list;
1702
 
3480 Serge 1703
	/** Stolen memory for this object, instead of being backed by shmem. */
1704
	struct drm_mm_node *stolen;
4104 Serge 1705
	struct list_head global_list;
2327 Serge 1706
 
1707
    struct list_head ring_list;
4104 Serge 1708
	/** Used in execbuf to temporarily hold a ref */
1709
	struct list_head obj_exec_link;
2327 Serge 1710
 
1711
    /**
3031 serge 1712
	 * This is set if the object is on the active lists (has pending
1713
	 * rendering and so a non-zero seqno), and is not set if it i s on
1714
	 * inactive (ready to be unbound) list.
2327 Serge 1715
     */
2342 Serge 1716
	unsigned int active:1;
2327 Serge 1717
 
1718
    /**
1719
     * This is set if the object has been written to since last bound
1720
     * to the GTT
1721
     */
2342 Serge 1722
	unsigned int dirty:1;
2327 Serge 1723
 
1724
    /**
1725
     * Fence register bits (if any) for this object.  Will be set
1726
     * as needed when mapped into the GTT.
1727
     * Protected by dev->struct_mutex.
1728
     */
2342 Serge 1729
	signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
2327 Serge 1730
 
1731
    /**
1732
     * Advice: are the backing pages purgeable?
1733
     */
2342 Serge 1734
	unsigned int madv:2;
2327 Serge 1735
 
1736
    /**
1737
     * Current tiling mode for the object.
1738
     */
2342 Serge 1739
	unsigned int tiling_mode:2;
3031 serge 1740
	/**
1741
	 * Whether the tiling parameters for the currently associated fence
1742
	 * register have changed. Note that for the purposes of tracking
1743
	 * tiling changes we also treat the unfenced register, the register
1744
	 * slot that the object occupies whilst it executes a fenced
1745
	 * command (such as BLT on gen2/3), as a "fence".
1746
	 */
1747
	unsigned int fence_dirty:1;
2327 Serge 1748
 
1749
    /**
1750
     * Is the object at the current location in the gtt mappable and
1751
     * fenceable? Used to avoid costly recalculations.
1752
     */
2342 Serge 1753
	unsigned int map_and_fenceable:1;
2327 Serge 1754
 
1755
    /**
1756
     * Whether the current gtt mapping needs to be mappable (and isn't just
1757
     * mappable by accident). Track pin and fault separate for a more
1758
     * accurate mappable working set.
1759
     */
2342 Serge 1760
	unsigned int fault_mappable:1;
1761
	unsigned int pin_mappable:1;
4104 Serge 1762
	unsigned int pin_display:1;
2327 Serge 1763
 
1764
    /*
5060 serge 1765
	 * Is the object to be mapped as read-only to the GPU
1766
	 * Only honoured if hardware has relevant pte bit
1767
	 */
1768
	unsigned long gt_ro:1;
1769
 
1770
	/*
2327 Serge 1771
     * Is the GPU currently using a fence to access this buffer,
1772
     */
1773
    unsigned int pending_fenced_gpu_access:1;
1774
    unsigned int fenced_gpu_access:1;
1775
 
4104 Serge 1776
	unsigned int cache_level:3;
2327 Serge 1777
 
3031 serge 1778
	unsigned int has_aliasing_ppgtt_mapping:1;
1779
	unsigned int has_global_gtt_mapping:1;
1780
	unsigned int has_dma_mapping:1;
2327 Serge 1781
 
5060 serge 1782
	unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
1783
 
3243 Serge 1784
	struct sg_table *pages;
3031 serge 1785
	int pages_pin_count;
2327 Serge 1786
 
3031 serge 1787
	/* prime dma-buf support */
1788
	void *dma_buf_vmapping;
1789
	int vmapping_count;
1790
 
5060 serge 1791
	struct intel_engine_cs *ring;
3031 serge 1792
 
2327 Serge 1793
    /** Breadcrumb of last rendering to the buffer. */
3031 serge 1794
	uint32_t last_read_seqno;
1795
	uint32_t last_write_seqno;
2327 Serge 1796
    /** Breadcrumb of last fenced GPU access to the buffer. */
1797
    uint32_t last_fenced_seqno;
1798
 
1799
    /** Current tiling stride for the object, if it's tiled. */
1800
    uint32_t stride;
1801
 
4560 Serge 1802
	/** References from framebuffers, locks out tiling changes. */
1803
	unsigned long framebuffer_references;
1804
 
2327 Serge 1805
    /** Record of address bit 17 of each page at last unbind. */
1806
    unsigned long *bit_17;
1807
 
1808
    /** User space pin count and filp owning the pin */
4560 Serge 1809
	unsigned long user_pin_count;
2327 Serge 1810
    struct drm_file *pin_filp;
1811
 
1812
    /** for phy allocated objects */
5060 serge 1813
	drm_dma_handle_t *phys_handle;
1814
 
1815
	union {
1816
		struct i915_gem_userptr {
1817
			uintptr_t ptr;
1818
			unsigned read_only :1;
1819
			unsigned workers :4;
1820
#define I915_GEM_USERPTR_MAX_WORKERS 15
1821
 
1822
			struct mm_struct *mm;
1823
			struct i915_mmu_object *mn;
1824
			struct work_struct *work;
1825
		} userptr;
1826
	};
2327 Serge 1827
};
2325 Serge 1828
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1829
 
5060 serge 1830
void i915_gem_track_fb(struct drm_i915_gem_object *old,
1831
		       struct drm_i915_gem_object *new,
1832
		       unsigned frontbuffer_bits);
1833
 
2325 Serge 1834
/**
1835
 * Request queue structure.
1836
 *
1837
 * The request queue allows us to note sequence numbers that have been emitted
1838
 * and may be associated with active buffers to be retired.
1839
 *
1840
 * By keeping this list, we can avoid having to do questionable
1841
 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1842
 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1843
 */
1844
struct drm_i915_gem_request {
1845
	/** On Which ring this request was generated */
5060 serge 1846
	struct intel_engine_cs *ring;
2325 Serge 1847
 
1848
	/** GEM sequence number associated with this request. */
1849
	uint32_t seqno;
1850
 
4104 Serge 1851
	/** Position in the ringbuffer of the start of the request */
1852
	u32 head;
1853
 
1854
	/** Position in the ringbuffer of the end of the request */
3031 serge 1855
	u32 tail;
1856
 
4104 Serge 1857
	/** Context related to this request */
5060 serge 1858
	struct intel_context *ctx;
4104 Serge 1859
 
1860
	/** Batch buffer related to this request if any */
1861
	struct drm_i915_gem_object *batch_obj;
1862
 
2325 Serge 1863
	/** Time at which this request was emitted, in jiffies. */
1864
	unsigned long emitted_jiffies;
1865
 
1866
	/** global list entry for this request */
1867
	struct list_head list;
1868
 
1869
	struct drm_i915_file_private *file_priv;
1870
	/** file_priv list entry for this request */
1871
	struct list_head client_list;
1872
};
1873
 
1874
struct drm_i915_file_private {
4560 Serge 1875
	struct drm_i915_private *dev_priv;
5060 serge 1876
	struct drm_file *file;
4560 Serge 1877
 
2325 Serge 1878
	struct {
3480 Serge 1879
		spinlock_t lock;
2325 Serge 1880
		struct list_head request_list;
4560 Serge 1881
		struct delayed_work idle_work;
2325 Serge 1882
	} mm;
3031 serge 1883
	struct idr context_idr;
4104 Serge 1884
 
4560 Serge 1885
	atomic_t rps_wait_boost;
5060 serge 1886
	struct  intel_engine_cs *bsd_ring;
2325 Serge 1887
};
1888
 
5060 serge 1889
/*
1890
 * A command that requires special handling by the command parser.
1891
 */
1892
struct drm_i915_cmd_descriptor {
1893
	/*
1894
	 * Flags describing how the command parser processes the command.
1895
	 *
1896
	 * CMD_DESC_FIXED: The command has a fixed length if this is set,
1897
	 *                 a length mask if not set
1898
	 * CMD_DESC_SKIP: The command is allowed but does not follow the
1899
	 *                standard length encoding for the opcode range in
1900
	 *                which it falls
1901
	 * CMD_DESC_REJECT: The command is never allowed
1902
	 * CMD_DESC_REGISTER: The command should be checked against the
1903
	 *                    register whitelist for the appropriate ring
1904
	 * CMD_DESC_MASTER: The command is allowed if the submitting process
1905
	 *                  is the DRM master
1906
	 */
1907
	u32 flags;
1908
#define CMD_DESC_FIXED    (1<<0)
1909
#define CMD_DESC_SKIP     (1<<1)
1910
#define CMD_DESC_REJECT   (1<<2)
1911
#define CMD_DESC_REGISTER (1<<3)
1912
#define CMD_DESC_BITMASK  (1<<4)
1913
#define CMD_DESC_MASTER   (1<<5)
2325 Serge 1914
 
5060 serge 1915
	/*
1916
	 * The command's unique identification bits and the bitmask to get them.
1917
	 * This isn't strictly the opcode field as defined in the spec and may
1918
	 * also include type, subtype, and/or subop fields.
1919
	 */
1920
	struct {
1921
		u32 value;
1922
		u32 mask;
1923
	} cmd;
1924
 
1925
	/*
1926
	 * The command's length. The command is either fixed length (i.e. does
1927
	 * not include a length field) or has a length field mask. The flag
1928
	 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
1929
	 * a length mask. All command entries in a command table must include
1930
	 * length information.
1931
	 */
1932
	union {
1933
		u32 fixed;
1934
		u32 mask;
1935
	} length;
1936
 
1937
	/*
1938
	 * Describes where to find a register address in the command to check
1939
	 * against the ring's register whitelist. Only valid if flags has the
1940
	 * CMD_DESC_REGISTER bit set.
1941
	 */
1942
	struct {
1943
		u32 offset;
1944
		u32 mask;
1945
	} reg;
1946
 
1947
#define MAX_CMD_DESC_BITMASKS 3
1948
	/*
1949
	 * Describes command checks where a particular dword is masked and
1950
	 * compared against an expected value. If the command does not match
1951
	 * the expected value, the parser rejects it. Only valid if flags has
1952
	 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
1953
	 * are valid.
1954
	 *
1955
	 * If the check specifies a non-zero condition_mask then the parser
1956
	 * only performs the check when the bits specified by condition_mask
1957
	 * are non-zero.
1958
	 */
1959
	struct {
1960
		u32 offset;
1961
		u32 mask;
1962
		u32 expected;
1963
		u32 condition_offset;
1964
		u32 condition_mask;
1965
	} bits[MAX_CMD_DESC_BITMASKS];
1966
};
1967
 
1968
/*
1969
 * A table of commands requiring special handling by the command parser.
1970
 *
1971
 * Each ring has an array of tables. Each table consists of an array of command
1972
 * descriptors, which must be sorted with command opcodes in ascending order.
1973
 */
1974
struct drm_i915_cmd_table {
1975
	const struct drm_i915_cmd_descriptor *table;
1976
	int count;
1977
};
1978
 
1979
#define INTEL_INFO(dev)	(&to_i915(dev)->info)
1980
 
4560 Serge 1981
#define IS_I830(dev)		((dev)->pdev->device == 0x3577)
1982
#define IS_845G(dev)		((dev)->pdev->device == 0x2562)
2325 Serge 1983
#define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x)
4560 Serge 1984
#define IS_I865G(dev)		((dev)->pdev->device == 0x2572)
2325 Serge 1985
#define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
4560 Serge 1986
#define IS_I915GM(dev)		((dev)->pdev->device == 0x2592)
1987
#define IS_I945G(dev)		((dev)->pdev->device == 0x2772)
2325 Serge 1988
#define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
1989
#define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
1990
#define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
4560 Serge 1991
#define IS_GM45(dev)		((dev)->pdev->device == 0x2A42)
2325 Serge 1992
#define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x)
4560 Serge 1993
#define IS_PINEVIEW_G(dev)	((dev)->pdev->device == 0xa001)
1994
#define IS_PINEVIEW_M(dev)	((dev)->pdev->device == 0xa011)
2325 Serge 1995
#define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
1996
#define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
4560 Serge 1997
#define IS_IRONLAKE_M(dev)	((dev)->pdev->device == 0x0046)
2325 Serge 1998
#define IS_IVYBRIDGE(dev)	(INTEL_INFO(dev)->is_ivybridge)
4560 Serge 1999
#define IS_IVB_GT1(dev)		((dev)->pdev->device == 0x0156 || \
2000
				 (dev)->pdev->device == 0x0152 || \
2001
				 (dev)->pdev->device == 0x015a)
2002
#define IS_SNB_GT1(dev)		((dev)->pdev->device == 0x0102 || \
2003
				 (dev)->pdev->device == 0x0106 || \
2004
				 (dev)->pdev->device == 0x010A)
3031 serge 2005
#define IS_VALLEYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview)
5060 serge 2006
#define IS_CHERRYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
3031 serge 2007
#define IS_HASWELL(dev)	(INTEL_INFO(dev)->is_haswell)
5060 serge 2008
#define IS_BROADWELL(dev)	(!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
2325 Serge 2009
#define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
4104 Serge 2010
#define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \
4560 Serge 2011
				 ((dev)->pdev->device & 0xFF00) == 0x0C00)
2012
#define IS_BDW_ULT(dev)		(IS_BROADWELL(dev) && \
2013
				 (((dev)->pdev->device & 0xf) == 0x2  || \
2014
				 ((dev)->pdev->device & 0xf) == 0x6 || \
2015
				 ((dev)->pdev->device & 0xf) == 0xe))
2016
#define IS_HSW_ULT(dev)		(IS_HASWELL(dev) && \
2017
				 ((dev)->pdev->device & 0xFF00) == 0x0A00)
2018
#define IS_ULT(dev)		(IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
2019
#define IS_HSW_GT3(dev)		(IS_HASWELL(dev) && \
2020
				 ((dev)->pdev->device & 0x00F0) == 0x0020)
5060 serge 2021
/* ULX machines are also considered ULT. */
2022
#define IS_HSW_ULX(dev)		((dev)->pdev->device == 0x0A0E || \
2023
				 (dev)->pdev->device == 0x0A1E)
4560 Serge 2024
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
2325 Serge 2025
 
2026
/*
2027
 * The genX designation typically refers to the render engine, so render
2028
 * capability related checks should use IS_GEN, while display and other checks
2029
 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
2030
 * chips, etc.).
2031
 */
2032
#define IS_GEN2(dev)	(INTEL_INFO(dev)->gen == 2)
2033
#define IS_GEN3(dev)	(INTEL_INFO(dev)->gen == 3)
2034
#define IS_GEN4(dev)	(INTEL_INFO(dev)->gen == 4)
2035
#define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)
2036
#define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
2037
#define IS_GEN7(dev)	(INTEL_INFO(dev)->gen == 7)
4560 Serge 2038
#define IS_GEN8(dev)	(INTEL_INFO(dev)->gen == 8)
2325 Serge 2039
 
4560 Serge 2040
#define RENDER_RING		(1<
2041
#define BSD_RING		(1<
2042
#define BLT_RING		(1<
2043
#define VEBOX_RING		(1<
5060 serge 2044
#define BSD2_RING		(1<
4560 Serge 2045
#define HAS_BSD(dev)            (INTEL_INFO(dev)->ring_mask & BSD_RING)
5060 serge 2046
#define HAS_BSD2(dev)		(INTEL_INFO(dev)->ring_mask & BSD2_RING)
4560 Serge 2047
#define HAS_BLT(dev)            (INTEL_INFO(dev)->ring_mask & BLT_RING)
2048
#define HAS_VEBOX(dev)            (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
3031 serge 2049
#define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
5060 serge 2050
#define HAS_WT(dev)		((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
2051
				 to_i915(dev)->ellc_size)
2325 Serge 2052
#define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
2053
 
3031 serge 2054
#define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 6)
5060 serge 2055
#define HAS_ALIASING_PPGTT(dev)	(INTEL_INFO(dev)->gen >= 6)
2056
#define HAS_PPGTT(dev)		(INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
2057
#define USES_PPGTT(dev)		intel_enable_ppgtt(dev, false)
2058
#define USES_FULL_PPGTT(dev)	intel_enable_ppgtt(dev, true)
3031 serge 2059
 
2325 Serge 2060
#define HAS_OVERLAY(dev)		(INTEL_INFO(dev)->has_overlay)
2061
#define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
2062
 
3243 Serge 2063
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
2064
#define HAS_BROKEN_CS_TLB(dev)		(IS_I830(dev) || IS_845G(dev))
5060 serge 2065
/*
2066
 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
2067
 * even when in MSI mode. This results in spurious interrupt warnings if the
2068
 * legacy irq no. is shared with another device. The kernel then disables that
2069
 * interrupt source and so prevents the other device from working properly.
2070
 */
2071
#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
2072
#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
3243 Serge 2073
 
2325 Serge 2074
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
2075
 * rows, which changed the alignment requirements and fence programming.
2076
 */
2077
#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
2078
						      IS_I915GM(dev)))
2079
#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
2080
#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
2081
#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
2082
#define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
2083
#define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
2084
 
2085
#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
2086
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
4560 Serge 2087
#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
2325 Serge 2088
 
4560 Serge 2089
#define HAS_IPS(dev)		(IS_ULT(dev) || IS_BROADWELL(dev))
2325 Serge 2090
 
4104 Serge 2091
#define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi)
2092
#define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
4560 Serge 2093
#define HAS_PSR(dev)		(IS_HASWELL(dev) || IS_BROADWELL(dev))
5060 serge 2094
#define HAS_RUNTIME_PM(dev)	(IS_GEN6(dev) || IS_HASWELL(dev) || \
2095
				 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
3480 Serge 2096
 
3243 Serge 2097
#define INTEL_PCH_DEVICE_ID_MASK		0xff00
2098
#define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
2099
#define INTEL_PCH_CPT_DEVICE_ID_TYPE		0x1c00
2100
#define INTEL_PCH_PPT_DEVICE_ID_TYPE		0x1e00
2101
#define INTEL_PCH_LPT_DEVICE_ID_TYPE		0x8c00
2102
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE		0x9c00
2103
 
4104 Serge 2104
#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
3031 serge 2105
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
2325 Serge 2106
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
2107
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
3746 Serge 2108
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
3031 serge 2109
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
2325 Serge 2110
 
5060 serge 2111
#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
2112
 
4560 Serge 2113
/* DPF == dynamic parity feature */
2114
#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
2115
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
2325 Serge 2116
 
3031 serge 2117
#define GT_FREQUENCY_MULTIPLIER 50
2118
 
2119
#include "i915_trace.h"
2120
 
2121
 
2325 Serge 2122
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
2123
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
2124
 
5060 serge 2125
/* i915_params.c */
2126
struct i915_params {
2127
	int modeset;
2128
	int panel_ignore_lid;
2129
	unsigned int powersave;
2130
	int semaphores;
2131
	unsigned int lvds_downclock;
2132
	int lvds_channel_mode;
2133
	int panel_use_ssc;
2134
	int vbt_sdvo_panel_type;
2135
	int enable_rc6;
2136
	int enable_fbc;
2137
	int enable_ppgtt;
2138
	int enable_psr;
2139
	unsigned int preliminary_hw_support;
2140
	int disable_power_well;
2141
	int enable_ips;
2142
	int invert_brightness;
2143
	int enable_cmd_parser;
2144
	/* leave bools at the end to not create holes */
2145
	bool enable_hangcheck;
2146
	bool fastboot;
2147
	bool prefault_disable;
2148
	bool reset;
2149
	bool disable_display;
2150
	bool disable_vtd_wa;
2151
	int use_mmio_flip;
2152
	bool mmio_debug;
2153
};
2154
extern struct i915_params i915 __read_mostly;
2155
 
2325 Serge 2156
				/* i915_dma.c */
3031 serge 2157
void i915_update_dri1_breadcrumb(struct drm_device *dev);
2325 Serge 2158
extern void i915_kernel_lost_context(struct drm_device * dev);
2159
extern int i915_driver_load(struct drm_device *, unsigned long flags);
2160
extern int i915_driver_unload(struct drm_device *);
5060 serge 2161
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
2325 Serge 2162
extern void i915_driver_lastclose(struct drm_device * dev);
2163
extern void i915_driver_preclose(struct drm_device *dev,
5060 serge 2164
				 struct drm_file *file);
2325 Serge 2165
extern void i915_driver_postclose(struct drm_device *dev,
5060 serge 2166
				  struct drm_file *file);
2325 Serge 2167
extern int i915_driver_device_is_agp(struct drm_device * dev);
3031 serge 2168
#ifdef CONFIG_COMPAT
2325 Serge 2169
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2170
			      unsigned long arg);
3031 serge 2171
#endif
2325 Serge 2172
extern int i915_emit_box(struct drm_device *dev,
2173
			 struct drm_clip_rect *box,
2174
			 int DR1, int DR4);
3031 serge 2175
extern int intel_gpu_reset(struct drm_device *dev);
2176
extern int i915_reset(struct drm_device *dev);
2325 Serge 2177
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
2178
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2179
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2180
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
5060 serge 2181
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2182
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2325 Serge 2183
 
4104 Serge 2184
extern void intel_console_resume(struct work_struct *work);
2325 Serge 2185
 
2186
/* i915_irq.c */
4104 Serge 2187
void i915_queue_hangcheck(struct drm_device *dev);
5060 serge 2188
__printf(3, 4)
2189
void i915_handle_error(struct drm_device *dev, bool wedged,
2190
		       const char *fmt, ...);
2325 Serge 2191
 
5060 serge 2192
void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
2193
							int new_delay);
2325 Serge 2194
extern void intel_irq_init(struct drm_device *dev);
3480 Serge 2195
extern void intel_hpd_init(struct drm_device *dev);
2325 Serge 2196
 
4104 Serge 2197
extern void intel_uncore_sanitize(struct drm_device *dev);
5060 serge 2198
extern void intel_uncore_early_sanitize(struct drm_device *dev,
2199
					bool restore_forcewake);
4104 Serge 2200
extern void intel_uncore_init(struct drm_device *dev);
2201
extern void intel_uncore_check_errors(struct drm_device *dev);
4560 Serge 2202
extern void intel_uncore_fini(struct drm_device *dev);
5060 serge 2203
extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
2325 Serge 2204
 
2205
void
5060 serge 2206
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
2207
		     u32 status_mask);
2325 Serge 2208
 
2209
void
5060 serge 2210
i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
2211
		      u32 status_mask);
2325 Serge 2212
 
5060 serge 2213
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
2214
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
2215
 
2325 Serge 2216
/* i915_gem.c */
2217
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
2218
			struct drm_file *file_priv);
2219
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
2220
			  struct drm_file *file_priv);
2221
int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
2222
			 struct drm_file *file_priv);
2223
int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
2224
			  struct drm_file *file_priv);
2225
int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
2226
			struct drm_file *file_priv);
2227
int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2228
			struct drm_file *file_priv);
2229
int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
2230
			      struct drm_file *file_priv);
2231
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
2232
			     struct drm_file *file_priv);
2233
int i915_gem_execbuffer(struct drm_device *dev, void *data,
2234
			struct drm_file *file_priv);
2235
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
2236
			 struct drm_file *file_priv);
2237
int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2238
		       struct drm_file *file_priv);
2239
int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2240
			 struct drm_file *file_priv);
2241
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2242
			struct drm_file *file_priv);
3031 serge 2243
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
2244
			       struct drm_file *file);
2245
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
2246
			       struct drm_file *file);
2325 Serge 2247
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2248
			    struct drm_file *file_priv);
2249
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
2250
			   struct drm_file *file_priv);
2251
int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2252
			   struct drm_file *file_priv);
2253
int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2254
			   struct drm_file *file_priv);
2255
int i915_gem_set_tiling(struct drm_device *dev, void *data,
2256
			struct drm_file *file_priv);
2257
int i915_gem_get_tiling(struct drm_device *dev, void *data,
2258
			struct drm_file *file_priv);
5060 serge 2259
int i915_gem_init_userptr(struct drm_device *dev);
2260
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
2261
			   struct drm_file *file);
2325 Serge 2262
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
2263
				struct drm_file *file_priv);
3031 serge 2264
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
2265
			struct drm_file *file_priv);
2325 Serge 2266
void i915_gem_load(struct drm_device *dev);
3480 Serge 2267
void *i915_gem_object_alloc(struct drm_device *dev);
2268
void i915_gem_object_free(struct drm_i915_gem_object *obj);
3031 serge 2269
void i915_gem_object_init(struct drm_i915_gem_object *obj,
2270
			 const struct drm_i915_gem_object_ops *ops);
2325 Serge 2271
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
2272
						  size_t size);
5060 serge 2273
void i915_init_vm(struct drm_i915_private *dev_priv,
2274
		  struct i915_address_space *vm);
2325 Serge 2275
void i915_gem_free_object(struct drm_gem_object *obj);
4104 Serge 2276
void i915_gem_vma_destroy(struct i915_vma *vma);
3480 Serge 2277
 
5060 serge 2278
#define PIN_MAPPABLE 0x1
2279
#define PIN_NONBLOCK 0x2
2280
#define PIN_GLOBAL 0x4
2281
#define PIN_OFFSET_BIAS 0x8
2282
#define PIN_OFFSET_MASK (~4095)
2325 Serge 2283
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
4104 Serge 2284
				     struct i915_address_space *vm,
2325 Serge 2285
				     uint32_t alignment,
5060 serge 2286
				     uint64_t flags);
4104 Serge 2287
int __must_check i915_vma_unbind(struct i915_vma *vma);
3480 Serge 2288
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
4560 Serge 2289
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
2325 Serge 2290
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
2291
void i915_gem_lastclose(struct drm_device *dev);
2292
 
5060 serge 2293
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
2294
				    int *needs_clflush);
2295
 
3031 serge 2296
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
3243 Serge 2297
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
3031 serge 2298
{
3746 Serge 2299
	struct sg_page_iter sg_iter;
3031 serge 2300
 
3746 Serge 2301
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
2302
		return sg_page_iter_page(&sg_iter);
2303
 
2304
	return NULL;
3243 Serge 2305
}
3031 serge 2306
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
2307
{
3243 Serge 2308
	BUG_ON(obj->pages == NULL);
3031 serge 2309
	obj->pages_pin_count++;
2310
}
2311
static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
2312
{
2313
	BUG_ON(obj->pages_pin_count == 0);
2314
	obj->pages_pin_count--;
2315
}
2316
 
2325 Serge 2317
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
3031 serge 2318
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
5060 serge 2319
			 struct intel_engine_cs *to);
4560 Serge 2320
void i915_vma_move_to_active(struct i915_vma *vma,
5060 serge 2321
			     struct intel_engine_cs *ring);
2325 Serge 2322
int i915_gem_dumb_create(struct drm_file *file_priv,
2323
			 struct drm_device *dev,
2324
			 struct drm_mode_create_dumb *args);
2325
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
2326
		      uint32_t handle, uint64_t *offset);
2327
/**
2328
 * Returns true if seq1 is later than seq2.
2329
 */
2340 Serge 2330
static inline bool
2331
i915_seqno_passed(uint32_t seq1, uint32_t seq2)
2332
{
2333
	return (int32_t)(seq1 - seq2) >= 0;
2334
}
2325 Serge 2335
 
3480 Serge 2336
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
2337
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
3031 serge 2338
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
2339
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
2340
 
5060 serge 2341
bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
2342
void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
2325 Serge 2343
 
5060 serge 2344
struct drm_i915_gem_request *
2345
i915_gem_find_active_request(struct intel_engine_cs *ring);
2332 Serge 2346
 
4560 Serge 2347
bool i915_gem_retire_requests(struct drm_device *dev);
5060 serge 2348
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
3480 Serge 2349
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
3031 serge 2350
				      bool interruptible);
5060 serge 2351
int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
2352
 
3480 Serge 2353
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
2354
{
2355
	return unlikely(atomic_read(&error->reset_counter)
4560 Serge 2356
			& (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
3480 Serge 2357
}
3031 serge 2358
 
3480 Serge 2359
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
2360
{
4560 Serge 2361
	return atomic_read(&error->reset_counter) & I915_WEDGED;
3480 Serge 2362
}
2363
 
4560 Serge 2364
static inline u32 i915_reset_count(struct i915_gpu_error *error)
2365
{
2366
	return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
2367
}
2368
 
5060 serge 2369
static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
2370
{
2371
	return dev_priv->gpu_error.stop_rings == 0 ||
2372
		dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
2373
}
2374
 
2375
static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
2376
{
2377
	return dev_priv->gpu_error.stop_rings == 0 ||
2378
		dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
2379
}
2380
 
2325 Serge 2381
void i915_gem_reset(struct drm_device *dev);
4104 Serge 2382
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
2325 Serge 2383
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
3031 serge 2384
int __must_check i915_gem_init(struct drm_device *dev);
2385
int __must_check i915_gem_init_hw(struct drm_device *dev);
5060 serge 2386
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
3031 serge 2387
void i915_gem_init_swizzling(struct drm_device *dev);
2325 Serge 2388
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
2389
int __must_check i915_gpu_idle(struct drm_device *dev);
4560 Serge 2390
int __must_check i915_gem_suspend(struct drm_device *dev);
5060 serge 2391
int __i915_add_request(struct intel_engine_cs *ring,
2325 Serge 2392
				  struct drm_file *file,
4104 Serge 2393
		       struct drm_i915_gem_object *batch_obj,
3031 serge 2394
		     u32 *seqno);
4104 Serge 2395
#define i915_add_request(ring, seqno) \
2396
	__i915_add_request(ring, NULL, NULL, seqno)
5060 serge 2397
int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
2325 Serge 2398
				   uint32_t seqno);
2399
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
2400
int __must_check
2401
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
2402
				  bool write);
2403
int __must_check
3031 serge 2404
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
2405
int __must_check
2325 Serge 2406
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2407
				     u32 alignment,
5060 serge 2408
				     struct intel_engine_cs *pipelined);
4104 Serge 2409
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
5060 serge 2410
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
2325 Serge 2411
				int align);
4560 Serge 2412
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
2325 Serge 2413
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
2414
 
2415
uint32_t
3480 Serge 2416
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
2417
uint32_t
2418
i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
2419
			    int tiling_mode, bool fenced);
2325 Serge 2420
 
2421
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2422
				    enum i915_cache_level cache_level);
2423
 
4104 Serge 2424
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
2425
				struct dma_buf *dma_buf);
3031 serge 2426
 
2427
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
2428
				struct drm_gem_object *gem_obj, int flags);
2429
 
3746 Serge 2430
void i915_gem_restore_fences(struct drm_device *dev);
2431
 
4104 Serge 2432
unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
2433
				  struct i915_address_space *vm);
2434
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
2435
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
2436
			struct i915_address_space *vm);
2437
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
2438
				struct i915_address_space *vm);
2439
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
2440
				     struct i915_address_space *vm);
2441
struct i915_vma *
2442
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2443
				  struct i915_address_space *vm);
4560 Serge 2444
 
2445
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
5060 serge 2446
static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
2447
	struct i915_vma *vma;
2448
	list_for_each_entry(vma, &obj->vma_list, vma_link)
2449
		if (vma->pin_count > 0)
2450
			return true;
2451
	return false;
2452
}
4560 Serge 2453
 
4104 Serge 2454
/* Some GGTT VM helpers */
2455
#define obj_to_ggtt(obj) \
2456
	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
2457
static inline bool i915_is_ggtt(struct i915_address_space *vm)
2458
{
2459
	struct i915_address_space *ggtt =
2460
		&((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
2461
	return vm == ggtt;
2462
}
2463
 
2464
static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
2465
{
2466
	return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
2467
}
2468
 
2469
static inline unsigned long
2470
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
2471
{
2472
	return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
2473
}
2474
 
2475
static inline unsigned long
2476
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
2477
{
2478
	return i915_gem_obj_size(obj, obj_to_ggtt(obj));
2479
}
2480
 
2481
static inline int __must_check
2482
i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2483
		      uint32_t alignment,
5060 serge 2484
		      unsigned flags)
4104 Serge 2485
{
5060 serge 2486
	return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL);
4104 Serge 2487
}
2488
 
5060 serge 2489
static inline int
2490
i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2491
{
2492
	return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
2493
}
2494
 
2495
void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
2496
 
3031 serge 2497
/* i915_gem_context.c */
5060 serge 2498
#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
4560 Serge 2499
int __must_check i915_gem_context_init(struct drm_device *dev);
3031 serge 2500
void i915_gem_context_fini(struct drm_device *dev);
5060 serge 2501
void i915_gem_context_reset(struct drm_device *dev);
2502
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
2503
int i915_gem_context_enable(struct drm_i915_private *dev_priv);
3031 serge 2504
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
5060 serge 2505
int i915_switch_context(struct intel_engine_cs *ring,
2506
			struct intel_context *to);
2507
struct intel_context *
2508
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
4104 Serge 2509
void i915_gem_context_free(struct kref *ctx_ref);
5060 serge 2510
static inline void i915_gem_context_reference(struct intel_context *ctx)
4104 Serge 2511
{
2512
	kref_get(&ctx->ref);
2513
}
2514
 
5060 serge 2515
static inline void i915_gem_context_unreference(struct intel_context *ctx)
4104 Serge 2516
{
2517
	kref_put(&ctx->ref, i915_gem_context_free);
2518
}
2519
 
5060 serge 2520
static inline bool i915_gem_context_is_default(const struct intel_context *c)
2521
{
2522
	return c->user_handle == DEFAULT_CONTEXT_HANDLE;
2523
}
2524
 
3031 serge 2525
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2526
				  struct drm_file *file);
2527
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2528
				   struct drm_file *file);
2529
 
5060 serge 2530
/* i915_gem_render_state.c */
2531
int i915_gem_render_state_init(struct intel_engine_cs *ring);
2325 Serge 2532
/* i915_gem_evict.c */
4104 Serge 2533
int __must_check i915_gem_evict_something(struct drm_device *dev,
2534
					  struct i915_address_space *vm,
2535
					  int min_size,
3031 serge 2536
					  unsigned alignment,
2537
					  unsigned cache_level,
5060 serge 2538
					  unsigned long start,
2539
					  unsigned long end,
2540
					  unsigned flags);
4560 Serge 2541
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
3031 serge 2542
int i915_gem_evict_everything(struct drm_device *dev);
2325 Serge 2543
 
5060 serge 2544
/* belongs in i915_gem_gtt.h */
2545
static inline void i915_gem_chipset_flush(struct drm_device *dev)
2546
{
2547
	if (INTEL_INFO(dev)->gen < 6)
2548
		intel_gtt_chipset_flush();
2549
}
2550
 
3031 serge 2551
/* i915_gem_stolen.c */
2552
int i915_gem_init_stolen(struct drm_device *dev);
5060 serge 2553
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
3480 Serge 2554
void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
3031 serge 2555
void i915_gem_cleanup_stolen(struct drm_device *dev);
3480 Serge 2556
struct drm_i915_gem_object *
2557
i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
3746 Serge 2558
struct drm_i915_gem_object *
2559
i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
2560
					       u32 stolen_offset,
2561
					       u32 gtt_offset,
2562
					       u32 size);
3031 serge 2563
 
2325 Serge 2564
/* i915_gem_tiling.c */
4104 Serge 2565
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
3480 Serge 2566
{
5060 serge 2567
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3480 Serge 2568
 
2569
	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
2570
		obj->tiling_mode != I915_TILING_NONE;
2571
}
2572
 
2325 Serge 2573
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
2574
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
2575
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
2576
 
2577
/* i915_gem_debug.c */
2578
#if WATCH_LISTS
2579
int i915_verify_lists(struct drm_device *dev);
2580
#else
2581
#define i915_verify_lists(dev) 0
2582
#endif
2583
 
2584
/* i915_debugfs.c */
2585
int i915_debugfs_init(struct drm_minor *minor);
2586
void i915_debugfs_cleanup(struct drm_minor *minor);
4560 Serge 2587
#ifdef CONFIG_DEBUG_FS
2588
void intel_display_crc_init(struct drm_device *dev);
2589
#else
2590
static inline void intel_display_crc_init(struct drm_device *dev) {}
2591
#endif
2325 Serge 2592
 
4104 Serge 2593
/* i915_gpu_error.c */
2594
__printf(2, 3)
2595
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
2596
int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
2597
			    const struct i915_error_state_file_priv *error);
2598
int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
2599
			      size_t count, loff_t pos);
2600
static inline void i915_error_state_buf_release(
2601
	struct drm_i915_error_state_buf *eb)
2602
{
2603
	kfree(eb->buf);
2604
}
5060 serge 2605
void i915_capture_error_state(struct drm_device *dev, bool wedge,
2606
			      const char *error_msg);
4104 Serge 2607
void i915_error_state_get(struct drm_device *dev,
2608
			  struct i915_error_state_file_priv *error_priv);
2609
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
2610
void i915_destroy_error_state(struct drm_device *dev);
2611
 
2612
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
2613
const char *i915_cache_level_str(int type);
2614
 
5060 serge 2615
/* i915_cmd_parser.c */
2616
int i915_cmd_parser_get_version(void);
2617
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
2618
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
2619
bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
2620
int i915_parse_cmds(struct intel_engine_cs *ring,
2621
		    struct drm_i915_gem_object *batch_obj,
2622
		    u32 batch_start_offset,
2623
		    bool is_master);
2624
 
2325 Serge 2625
/* i915_suspend.c */
2626
extern int i915_save_state(struct drm_device *dev);
2627
extern int i915_restore_state(struct drm_device *dev);
2628
 
3480 Serge 2629
/* i915_ums.c */
2630
void i915_save_display_reg(struct drm_device *dev);
2631
void i915_restore_display_reg(struct drm_device *dev);
2325 Serge 2632
 
3031 serge 2633
/* i915_sysfs.c */
2634
void i915_setup_sysfs(struct drm_device *dev_priv);
2635
void i915_teardown_sysfs(struct drm_device *dev_priv);
2636
 
2325 Serge 2637
/* intel_i2c.c */
2638
extern int intel_setup_gmbus(struct drm_device *dev);
2639
extern void intel_teardown_gmbus(struct drm_device *dev);
4104 Serge 2640
static inline bool intel_gmbus_is_port_valid(unsigned port)
3031 serge 2641
{
2642
	return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
2643
}
2644
 
2645
extern struct i2c_adapter *intel_gmbus_get_adapter(
2646
		struct drm_i915_private *dev_priv, unsigned port);
2325 Serge 2647
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
2648
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
4104 Serge 2649
static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
2342 Serge 2650
{
2651
	return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
2652
}
2325 Serge 2653
extern void intel_i2c_reset(struct drm_device *dev);
2654
 
2655
/* intel_opregion.c */
4560 Serge 2656
struct intel_encoder;
2657
#ifdef CONFIG_ACPI
2325 Serge 2658
extern int intel_opregion_setup(struct drm_device *dev);
2659
extern void intel_opregion_init(struct drm_device *dev);
2660
extern void intel_opregion_fini(struct drm_device *dev);
2661
extern void intel_opregion_asle_intr(struct drm_device *dev);
4560 Serge 2662
extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
2663
					 bool enable);
2664
extern int intel_opregion_notify_adapter(struct drm_device *dev,
2665
					 pci_power_t state);
2325 Serge 2666
#else
4560 Serge 2667
static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
2325 Serge 2668
static inline void intel_opregion_init(struct drm_device *dev) { return; }
2669
static inline void intel_opregion_fini(struct drm_device *dev) { return; }
2670
static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
4560 Serge 2671
static inline int
2672
intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
2673
{
2674
	return 0;
2675
}
2676
static inline int
2677
intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
2678
{
2679
	return 0;
2680
}
2325 Serge 2681
#endif
2682
 
2683
/* intel_acpi.c */
2684
#ifdef CONFIG_ACPI
2685
extern void intel_register_dsm_handler(void);
2686
extern void intel_unregister_dsm_handler(void);
2687
#else
2688
static inline void intel_register_dsm_handler(void) { return; }
2689
static inline void intel_unregister_dsm_handler(void) { return; }
2690
#endif /* CONFIG_ACPI */
2691
 
2692
/* modesetting */
3031 serge 2693
extern void intel_modeset_init_hw(struct drm_device *dev);
4104 Serge 2694
extern void intel_modeset_suspend_hw(struct drm_device *dev);
2325 Serge 2695
extern void intel_modeset_init(struct drm_device *dev);
2696
extern void intel_modeset_gem_init(struct drm_device *dev);
2697
extern void intel_modeset_cleanup(struct drm_device *dev);
5060 serge 2698
extern void intel_connector_unregister(struct intel_connector *);
2325 Serge 2699
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
3243 Serge 2700
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
2701
					 bool force_restore);
3480 Serge 2702
extern void i915_redisable_vga(struct drm_device *dev);
5060 serge 2703
extern void i915_redisable_vga_power_on(struct drm_device *dev);
2325 Serge 2704
extern bool intel_fbc_enabled(struct drm_device *dev);
2705
extern void intel_disable_fbc(struct drm_device *dev);
2706
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
3243 Serge 2707
extern void intel_init_pch_refclk(struct drm_device *dev);
2325 Serge 2708
extern void gen6_set_rps(struct drm_device *dev, u8 val);
4104 Serge 2709
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
5060 serge 2710
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
2711
				  bool enable);
2342 Serge 2712
extern void intel_detect_pch(struct drm_device *dev);
2713
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
3031 serge 2714
extern int intel_enable_rc6(const struct drm_device *dev);
2325 Serge 2715
 
3031 serge 2716
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
2717
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
2718
			struct drm_file *file);
4560 Serge 2719
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
2720
			       struct drm_file *file);
2342 Serge 2721
 
5060 serge 2722
void intel_notify_mmio_flip(struct intel_engine_cs *ring);
2723
 
2325 Serge 2724
/* overlay */
2725
#ifdef CONFIG_DEBUG_FS
2726
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
4104 Serge 2727
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
2728
					    struct intel_overlay_error_state *error);
2325 Serge 2729
 
2730
extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
4104 Serge 2731
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
2325 Serge 2732
					    struct drm_device *dev,
2733
					    struct intel_display_error_state *error);
2734
#endif
2735
 
2736
/* On SNB platform, before reading ring registers forcewake bit
2737
 * must be set to prevent GT core from power down and stale values being
2738
 * returned.
2739
 */
4560 Serge 2740
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
2741
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
5060 serge 2742
void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
2325 Serge 2743
 
3243 Serge 2744
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
2745
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
2746
 
4104 Serge 2747
/* intel_sideband.c */
2748
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
2749
void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
2750
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
4560 Serge 2751
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
2752
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2753
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
2754
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2755
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
2756
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2757
u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
2758
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2759
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
2760
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2761
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
2762
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
4104 Serge 2763
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
2764
		   enum intel_sbi_destination destination);
2765
void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
2766
		     enum intel_sbi_destination destination);
4560 Serge 2767
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
2768
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2325 Serge 2769
 
4560 Serge 2770
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
2771
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
4104 Serge 2772
 
4560 Serge 2773
#define FORCEWAKE_RENDER	(1 << 0)
2774
#define FORCEWAKE_MEDIA		(1 << 1)
2775
#define FORCEWAKE_ALL		(FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
2325 Serge 2776
 
2777
 
4560 Serge 2778
#define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
2779
#define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
2325 Serge 2780
 
4560 Serge 2781
#define I915_READ16(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
2782
#define I915_WRITE16(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
2783
#define I915_READ16_NOTRACE(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
2784
#define I915_WRITE16_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
2785
 
2786
#define I915_READ(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
2787
#define I915_WRITE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
2788
#define I915_READ_NOTRACE(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
2789
#define I915_WRITE_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
2790
 
5060 serge 2791
/* Be very careful with read/write 64-bit values. On 32-bit machines, they
2792
 * will be implemented using 2 32-bit writes in an arbitrary order with
2793
 * an arbitrary delay between them. This can cause the hardware to
2794
 * act upon the intermediate value, possibly leading to corruption and
2795
 * machine death. You have been warned.
2796
 */
4560 Serge 2797
#define I915_WRITE64(reg, val)	dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
2798
#define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
2799
 
5060 serge 2800
#define I915_READ64_2x32(lower_reg, upper_reg) ({			\
2801
		u32 upper = I915_READ(upper_reg);			\
2802
		u32 lower = I915_READ(lower_reg);			\
2803
		u32 tmp = I915_READ(upper_reg);				\
2804
		if (upper != tmp) {					\
2805
			upper = tmp;					\
2806
			lower = I915_READ(lower_reg);			\
2807
			WARN_ON(I915_READ(upper_reg) != upper);		\
2808
		}							\
2809
		(u64)upper << 32 | lower; })
2810
 
2325 Serge 2811
#define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
2812
#define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
2813
 
3480 Serge 2814
/* "Broadcast RGB" property */
2815
#define INTEL_BROADCAST_RGB_AUTO 0
2816
#define INTEL_BROADCAST_RGB_FULL 1
2817
#define INTEL_BROADCAST_RGB_LIMITED 2
2818
 
2819
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
2820
{
5060 serge 2821
	if (IS_VALLEYVIEW(dev))
2822
		return VLV_VGACNTRL;
2823
	else if (INTEL_INFO(dev)->gen >= 5)
3480 Serge 2824
		return CPU_VGACNTRL;
2825
	else
2826
		return VGACNTRL;
2827
}
2828
 
3746 Serge 2829
static inline void __user *to_user_ptr(u64 address)
2830
{
2831
	return (void __user *)(uintptr_t)address;
2832
}
2833
 
2834
static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
2835
{
2836
	unsigned long j = msecs_to_jiffies(m);
2837
 
2838
	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2839
}
2840
 
2841
static inline unsigned long
2842
timespec_to_jiffies_timeout(const struct timespec *value)
2843
{
2844
	unsigned long j = timespec_to_jiffies(value);
2845
 
2846
	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2847
}
2848
 
5060 serge 2849
/*
2850
 * If you need to wait X milliseconds between events A and B, but event B
2851
 * doesn't happen exactly after event A, you record the timestamp (jiffies) of
2852
 * when event A happened, then just before event B you call this function and
2853
 * pass the timestamp as the first argument, and X as the second argument.
2854
 */
2855
static inline void
2856
wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
4280 Serge 2857
{
5060 serge 2858
	unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
2859
 
2860
	/*
2861
	 * Don't re-read the value of "jiffies" every time since it may change
2862
	 * behind our back and break the math.
2863
	 */
2864
	tmp_jiffies = jiffies;
2865
	target_jiffies = timestamp_jiffies +
2866
			 msecs_to_jiffies_timeout(to_wait_ms);
2867
 
2868
	if (time_after(target_jiffies, tmp_jiffies)) {
2869
		remaining_jiffies = target_jiffies - tmp_jiffies;
2870
		while ((int)remaining_jiffies > 0) {
2871
			delay(remaining_jiffies);
2872
			remaining_jiffies = target_jiffies - jiffies;
2873
		}
2874
	}
4280 Serge 2875
}
3746 Serge 2876
 
2338 Serge 2877
typedef struct
2878
{
2879
  int width;
2880
  int height;
2881
  int bpp;
2882
  int freq;
2883
}videomode_t;
2325 Serge 2884
 
4280 Serge 2885
struct cmdtable
2360 Serge 2886
{
4280 Serge 2887
    char *key;
2888
    int   size;
2889
    int  *val;
2890
};
2360 Serge 2891
 
4280 Serge 2892
#define CMDENTRY(key, val) {(key), (sizeof(key)-1), &val}
2360 Serge 2893
 
4280 Serge 2894
void parse_cmdline(char *cmdline, struct cmdtable *table, char *log, videomode_t *mode);
2895
struct drm_i915_gem_object
2896
*kos_gem_fb_object_create(struct drm_device *dev, u32 gtt_offset, u32 size);
2360 Serge 2897
 
4560 Serge 2898
extern struct drm_i915_gem_object *main_fb_obj;
2899
 
4280 Serge 2900
static struct drm_i915_gem_object *get_fb_obj()
2901
{
4560 Serge 2902
    return main_fb_obj;
4280 Serge 2903
};
2360 Serge 2904
 
4280 Serge 2905
#define ioread32(addr)          readl(addr)
2360 Serge 2906
 
2907
 
2325 Serge 2908
#endif