Rev 2336 | Rev 2340 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2336 | Rev 2338 | ||
---|---|---|---|
1 | /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- |
1 | /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- |
2 | */ |
2 | */ |
3 | /* |
3 | /* |
4 | * |
4 | * |
5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
6 | * All Rights Reserved. |
6 | * All Rights Reserved. |
7 | * |
7 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a |
8 | * Permission is hereby granted, free of charge, to any person obtaining a |
9 | * copy of this software and associated documentation files (the |
9 | * copy of this software and associated documentation files (the |
10 | * "Software"), to deal in the Software without restriction, including |
10 | * "Software"), to deal in the Software without restriction, including |
11 | * without limitation the rights to use, copy, modify, merge, publish, |
11 | * without limitation the rights to use, copy, modify, merge, publish, |
12 | * distribute, sub license, and/or sell copies of the Software, and to |
12 | * distribute, sub license, and/or sell copies of the Software, and to |
13 | * permit persons to whom the Software is furnished to do so, subject to |
13 | * permit persons to whom the Software is furnished to do so, subject to |
14 | * the following conditions: |
14 | * the following conditions: |
15 | * |
15 | * |
16 | * The above copyright notice and this permission notice (including the |
16 | * The above copyright notice and this permission notice (including the |
17 | * next paragraph) shall be included in all copies or substantial portions |
17 | * next paragraph) shall be included in all copies or substantial portions |
18 | * of the Software. |
18 | * of the Software. |
19 | * |
19 | * |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
27 | * |
27 | * |
28 | */ |
28 | */ |
29 | 29 | ||
30 | #ifndef _I915_DRV_H_ |
30 | #ifndef _I915_DRV_H_ |
31 | #define _I915_DRV_H_ |
31 | #define _I915_DRV_H_ |
32 | 32 | ||
33 | #include "i915_reg.h" |
33 | #include "i915_reg.h" |
34 | #include "intel_bios.h" |
34 | #include "intel_bios.h" |
35 | #include "intel_ringbuffer.h" |
35 | #include "intel_ringbuffer.h" |
36 | //#include |
36 | //#include |
37 | #include |
37 | #include |
38 | #include |
38 | #include |
39 | //#include |
39 | //#include |
40 | 40 | ||
41 | #include |
41 | #include |
42 | 42 | ||
43 | /* General customization: |
43 | /* General customization: |
44 | */ |
44 | */ |
45 | 45 | ||
46 | #define I915_TILING_NONE 0 |
46 | #define I915_TILING_NONE 0 |
47 | 47 | ||
48 | 48 | ||
49 | #define DRIVER_AUTHOR "Tungsten Graphics, Inc." |
49 | #define DRIVER_AUTHOR "Tungsten Graphics, Inc." |
50 | 50 | ||
51 | #define DRIVER_NAME "i915" |
51 | #define DRIVER_NAME "i915" |
52 | #define DRIVER_DESC "Intel Graphics" |
52 | #define DRIVER_DESC "Intel Graphics" |
53 | #define DRIVER_DATE "20080730" |
53 | #define DRIVER_DATE "20080730" |
54 | 54 | ||
55 | enum pipe { |
55 | enum pipe { |
56 | PIPE_A = 0, |
56 | PIPE_A = 0, |
57 | PIPE_B, |
57 | PIPE_B, |
58 | PIPE_C, |
58 | PIPE_C, |
59 | I915_MAX_PIPES |
59 | I915_MAX_PIPES |
60 | }; |
60 | }; |
61 | #define pipe_name(p) ((p) + 'A') |
61 | #define pipe_name(p) ((p) + 'A') |
62 | 62 | ||
63 | enum plane { |
63 | enum plane { |
64 | PLANE_A = 0, |
64 | PLANE_A = 0, |
65 | PLANE_B, |
65 | PLANE_B, |
66 | PLANE_C, |
66 | PLANE_C, |
67 | }; |
67 | }; |
68 | #define plane_name(p) ((p) + 'A') |
68 | #define plane_name(p) ((p) + 'A') |
69 | 69 | ||
70 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) |
70 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) |
71 | 71 | ||
72 | #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) |
72 | #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) |
73 | 73 | ||
74 | /* Interface history: |
74 | /* Interface history: |
75 | * |
75 | * |
76 | * 1.1: Original. |
76 | * 1.1: Original. |
77 | * 1.2: Add Power Management |
77 | * 1.2: Add Power Management |
78 | * 1.3: Add vblank support |
78 | * 1.3: Add vblank support |
79 | * 1.4: Fix cmdbuffer path, add heap destroy |
79 | * 1.4: Fix cmdbuffer path, add heap destroy |
80 | * 1.5: Add vblank pipe configuration |
80 | * 1.5: Add vblank pipe configuration |
81 | * 1.6: - New ioctl for scheduling buffer swaps on vertical blank |
81 | * 1.6: - New ioctl for scheduling buffer swaps on vertical blank |
82 | * - Support vertical blank on secondary display pipe |
82 | * - Support vertical blank on secondary display pipe |
83 | */ |
83 | */ |
84 | #define DRIVER_MAJOR 1 |
84 | #define DRIVER_MAJOR 1 |
85 | #define DRIVER_MINOR 6 |
85 | #define DRIVER_MINOR 6 |
86 | #define DRIVER_PATCHLEVEL 0 |
86 | #define DRIVER_PATCHLEVEL 0 |
87 | 87 | ||
88 | #define WATCH_COHERENCY 0 |
88 | #define WATCH_COHERENCY 0 |
89 | #define WATCH_LISTS 0 |
89 | #define WATCH_LISTS 0 |
90 | 90 | ||
91 | #define I915_GEM_PHYS_CURSOR_0 1 |
91 | #define I915_GEM_PHYS_CURSOR_0 1 |
92 | #define I915_GEM_PHYS_CURSOR_1 2 |
92 | #define I915_GEM_PHYS_CURSOR_1 2 |
93 | #define I915_GEM_PHYS_OVERLAY_REGS 3 |
93 | #define I915_GEM_PHYS_OVERLAY_REGS 3 |
94 | #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) |
94 | #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) |
95 | 95 | ||
96 | struct mem_block { |
96 | struct mem_block { |
97 | struct mem_block *next; |
97 | struct mem_block *next; |
98 | struct mem_block *prev; |
98 | struct mem_block *prev; |
99 | int start; |
99 | int start; |
100 | int size; |
100 | int size; |
101 | struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ |
101 | struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ |
102 | }; |
102 | }; |
103 | 103 | ||
104 | struct opregion_header; |
104 | struct opregion_header; |
105 | struct opregion_acpi; |
105 | struct opregion_acpi; |
106 | struct opregion_swsci; |
106 | struct opregion_swsci; |
107 | struct opregion_asle; |
107 | struct opregion_asle; |
108 | 108 | ||
109 | struct intel_opregion { |
109 | struct intel_opregion { |
110 | struct opregion_header *header; |
110 | struct opregion_header *header; |
111 | struct opregion_acpi *acpi; |
111 | struct opregion_acpi *acpi; |
112 | struct opregion_swsci *swsci; |
112 | struct opregion_swsci *swsci; |
113 | struct opregion_asle *asle; |
113 | struct opregion_asle *asle; |
114 | void *vbt; |
114 | void *vbt; |
115 | u32 __iomem *lid_state; |
115 | u32 __iomem *lid_state; |
116 | }; |
116 | }; |
117 | #define OPREGION_SIZE (8*1024) |
117 | #define OPREGION_SIZE (8*1024) |
118 | 118 | ||
119 | struct intel_overlay; |
119 | struct intel_overlay; |
120 | struct intel_overlay_error_state; |
120 | struct intel_overlay_error_state; |
121 | 121 | ||
122 | struct drm_i915_master_private { |
122 | struct drm_i915_master_private { |
123 | drm_local_map_t *sarea; |
123 | drm_local_map_t *sarea; |
124 | struct _drm_i915_sarea *sarea_priv; |
124 | struct _drm_i915_sarea *sarea_priv; |
125 | }; |
125 | }; |
126 | #define I915_FENCE_REG_NONE -1 |
126 | #define I915_FENCE_REG_NONE -1 |
127 | 127 | ||
128 | struct drm_i915_fence_reg { |
128 | struct drm_i915_fence_reg { |
129 | struct list_head lru_list; |
129 | struct list_head lru_list; |
130 | struct drm_i915_gem_object *obj; |
130 | struct drm_i915_gem_object *obj; |
131 | uint32_t setup_seqno; |
131 | uint32_t setup_seqno; |
132 | }; |
132 | }; |
133 | 133 | ||
134 | struct sdvo_device_mapping { |
134 | struct sdvo_device_mapping { |
135 | u8 initialized; |
135 | u8 initialized; |
136 | u8 dvo_port; |
136 | u8 dvo_port; |
137 | u8 slave_addr; |
137 | u8 slave_addr; |
138 | u8 dvo_wiring; |
138 | u8 dvo_wiring; |
139 | u8 i2c_pin; |
139 | u8 i2c_pin; |
140 | u8 i2c_speed; |
140 | u8 i2c_speed; |
141 | u8 ddc_pin; |
141 | u8 ddc_pin; |
142 | }; |
142 | }; |
143 | 143 | ||
144 | struct intel_display_error_state; |
144 | struct intel_display_error_state; |
145 | 145 | ||
146 | struct drm_i915_error_state { |
146 | struct drm_i915_error_state { |
147 | u32 eir; |
147 | u32 eir; |
148 | u32 pgtbl_er; |
148 | u32 pgtbl_er; |
149 | u32 pipestat[I915_MAX_PIPES]; |
149 | u32 pipestat[I915_MAX_PIPES]; |
150 | u32 ipeir; |
150 | u32 ipeir; |
151 | u32 ipehr; |
151 | u32 ipehr; |
152 | u32 instdone; |
152 | u32 instdone; |
153 | u32 acthd; |
153 | u32 acthd; |
154 | u32 error; /* gen6+ */ |
154 | u32 error; /* gen6+ */ |
155 | u32 bcs_acthd; /* gen6+ blt engine */ |
155 | u32 bcs_acthd; /* gen6+ blt engine */ |
156 | u32 bcs_ipehr; |
156 | u32 bcs_ipehr; |
157 | u32 bcs_ipeir; |
157 | u32 bcs_ipeir; |
158 | u32 bcs_instdone; |
158 | u32 bcs_instdone; |
159 | u32 bcs_seqno; |
159 | u32 bcs_seqno; |
160 | u32 vcs_acthd; /* gen6+ bsd engine */ |
160 | u32 vcs_acthd; /* gen6+ bsd engine */ |
161 | u32 vcs_ipehr; |
161 | u32 vcs_ipehr; |
162 | u32 vcs_ipeir; |
162 | u32 vcs_ipeir; |
163 | u32 vcs_instdone; |
163 | u32 vcs_instdone; |
164 | u32 vcs_seqno; |
164 | u32 vcs_seqno; |
165 | u32 instpm; |
165 | u32 instpm; |
166 | u32 instps; |
166 | u32 instps; |
167 | u32 instdone1; |
167 | u32 instdone1; |
168 | u32 seqno; |
168 | u32 seqno; |
169 | u64 bbaddr; |
169 | u64 bbaddr; |
170 | u64 fence[16]; |
170 | u64 fence[16]; |
171 | struct timeval time; |
171 | struct timeval time; |
172 | struct drm_i915_error_object { |
172 | struct drm_i915_error_object { |
173 | int page_count; |
173 | int page_count; |
174 | u32 gtt_offset; |
174 | u32 gtt_offset; |
175 | u32 *pages[0]; |
175 | u32 *pages[0]; |
176 | } *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS]; |
176 | } *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS]; |
177 | struct drm_i915_error_buffer { |
177 | struct drm_i915_error_buffer { |
178 | u32 size; |
178 | u32 size; |
179 | u32 name; |
179 | u32 name; |
180 | u32 seqno; |
180 | u32 seqno; |
181 | u32 gtt_offset; |
181 | u32 gtt_offset; |
182 | u32 read_domains; |
182 | u32 read_domains; |
183 | u32 write_domain; |
183 | u32 write_domain; |
184 | s32 fence_reg:5; |
184 | s32 fence_reg:5; |
185 | s32 pinned:2; |
185 | s32 pinned:2; |
186 | u32 tiling:2; |
186 | u32 tiling:2; |
187 | u32 dirty:1; |
187 | u32 dirty:1; |
188 | u32 purgeable:1; |
188 | u32 purgeable:1; |
189 | u32 ring:4; |
189 | u32 ring:4; |
190 | u32 cache_level:2; |
190 | u32 cache_level:2; |
191 | } *active_bo, *pinned_bo; |
191 | } *active_bo, *pinned_bo; |
192 | u32 active_bo_count, pinned_bo_count; |
192 | u32 active_bo_count, pinned_bo_count; |
193 | struct intel_overlay_error_state *overlay; |
193 | struct intel_overlay_error_state *overlay; |
194 | struct intel_display_error_state *display; |
194 | struct intel_display_error_state *display; |
195 | }; |
195 | }; |
196 | 196 | ||
197 | struct drm_i915_display_funcs { |
197 | struct drm_i915_display_funcs { |
198 | void (*dpms)(struct drm_crtc *crtc, int mode); |
198 | void (*dpms)(struct drm_crtc *crtc, int mode); |
199 | bool (*fbc_enabled)(struct drm_device *dev); |
199 | bool (*fbc_enabled)(struct drm_device *dev); |
200 | void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); |
200 | void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); |
201 | void (*disable_fbc)(struct drm_device *dev); |
201 | void (*disable_fbc)(struct drm_device *dev); |
202 | int (*get_display_clock_speed)(struct drm_device *dev); |
202 | int (*get_display_clock_speed)(struct drm_device *dev); |
203 | int (*get_fifo_size)(struct drm_device *dev, int plane); |
203 | int (*get_fifo_size)(struct drm_device *dev, int plane); |
204 | void (*update_wm)(struct drm_device *dev); |
204 | void (*update_wm)(struct drm_device *dev); |
205 | int (*crtc_mode_set)(struct drm_crtc *crtc, |
205 | int (*crtc_mode_set)(struct drm_crtc *crtc, |
206 | struct drm_display_mode *mode, |
206 | struct drm_display_mode *mode, |
207 | struct drm_display_mode *adjusted_mode, |
207 | struct drm_display_mode *adjusted_mode, |
208 | int x, int y, |
208 | int x, int y, |
209 | struct drm_framebuffer *old_fb); |
209 | struct drm_framebuffer *old_fb); |
210 | void (*fdi_link_train)(struct drm_crtc *crtc); |
210 | void (*fdi_link_train)(struct drm_crtc *crtc); |
211 | void (*init_clock_gating)(struct drm_device *dev); |
211 | void (*init_clock_gating)(struct drm_device *dev); |
212 | void (*init_pch_clock_gating)(struct drm_device *dev); |
212 | void (*init_pch_clock_gating)(struct drm_device *dev); |
213 | int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, |
213 | int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, |
214 | struct drm_framebuffer *fb, |
214 | struct drm_framebuffer *fb, |
215 | struct drm_i915_gem_object *obj); |
215 | struct drm_i915_gem_object *obj); |
216 | int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
216 | int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
217 | int x, int y); |
217 | int x, int y); |
218 | /* clock updates for mode set */ |
218 | /* clock updates for mode set */ |
219 | /* cursor updates */ |
219 | /* cursor updates */ |
220 | /* render clock increase/decrease */ |
220 | /* render clock increase/decrease */ |
221 | /* display clock increase/decrease */ |
221 | /* display clock increase/decrease */ |
222 | /* pll clock increase/decrease */ |
222 | /* pll clock increase/decrease */ |
223 | }; |
223 | }; |
224 | 224 | ||
225 | struct intel_device_info { |
225 | struct intel_device_info { |
226 | u8 gen; |
226 | u8 gen; |
227 | u8 is_mobile : 1; |
227 | u8 is_mobile : 1; |
228 | u8 is_i85x : 1; |
228 | u8 is_i85x : 1; |
229 | u8 is_i915g : 1; |
229 | u8 is_i915g : 1; |
230 | u8 is_i945gm : 1; |
230 | u8 is_i945gm : 1; |
231 | u8 is_g33 : 1; |
231 | u8 is_g33 : 1; |
232 | u8 need_gfx_hws : 1; |
232 | u8 need_gfx_hws : 1; |
233 | u8 is_g4x : 1; |
233 | u8 is_g4x : 1; |
234 | u8 is_pineview : 1; |
234 | u8 is_pineview : 1; |
235 | u8 is_broadwater : 1; |
235 | u8 is_broadwater : 1; |
236 | u8 is_crestline : 1; |
236 | u8 is_crestline : 1; |
237 | u8 is_ivybridge : 1; |
237 | u8 is_ivybridge : 1; |
238 | u8 has_fbc : 1; |
238 | u8 has_fbc : 1; |
239 | u8 has_pipe_cxsr : 1; |
239 | u8 has_pipe_cxsr : 1; |
240 | u8 has_hotplug : 1; |
240 | u8 has_hotplug : 1; |
241 | u8 cursor_needs_physical : 1; |
241 | u8 cursor_needs_physical : 1; |
242 | u8 has_overlay : 1; |
242 | u8 has_overlay : 1; |
243 | u8 overlay_needs_physical : 1; |
243 | u8 overlay_needs_physical : 1; |
244 | u8 supports_tv : 1; |
244 | u8 supports_tv : 1; |
245 | u8 has_bsd_ring : 1; |
245 | u8 has_bsd_ring : 1; |
246 | u8 has_blt_ring : 1; |
246 | u8 has_blt_ring : 1; |
247 | }; |
247 | }; |
248 | 248 | ||
249 | enum no_fbc_reason { |
249 | enum no_fbc_reason { |
250 | FBC_NO_OUTPUT, /* no outputs enabled to compress */ |
250 | FBC_NO_OUTPUT, /* no outputs enabled to compress */ |
251 | FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ |
251 | FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ |
252 | FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ |
252 | FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ |
253 | FBC_MODE_TOO_LARGE, /* mode too large for compression */ |
253 | FBC_MODE_TOO_LARGE, /* mode too large for compression */ |
254 | FBC_BAD_PLANE, /* fbc not supported on plane */ |
254 | FBC_BAD_PLANE, /* fbc not supported on plane */ |
255 | FBC_NOT_TILED, /* buffer not tiled */ |
255 | FBC_NOT_TILED, /* buffer not tiled */ |
256 | FBC_MULTIPLE_PIPES, /* more than one pipe active */ |
256 | FBC_MULTIPLE_PIPES, /* more than one pipe active */ |
257 | FBC_MODULE_PARAM, |
257 | FBC_MODULE_PARAM, |
258 | }; |
258 | }; |
259 | 259 | ||
260 | enum intel_pch { |
260 | enum intel_pch { |
261 | PCH_IBX, /* Ibexpeak PCH */ |
261 | PCH_IBX, /* Ibexpeak PCH */ |
262 | PCH_CPT, /* Cougarpoint PCH */ |
262 | PCH_CPT, /* Cougarpoint PCH */ |
263 | }; |
263 | }; |
264 | 264 | ||
265 | #define QUIRK_PIPEA_FORCE (1<<0) |
265 | #define QUIRK_PIPEA_FORCE (1<<0) |
266 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) |
266 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) |
267 | 267 | ||
268 | struct intel_fbdev; |
268 | struct intel_fbdev; |
269 | struct intel_fbc_work; |
269 | struct intel_fbc_work; |
270 | 270 | ||
271 | typedef struct drm_i915_private { |
271 | typedef struct drm_i915_private { |
272 | struct drm_device *dev; |
272 | struct drm_device *dev; |
273 | 273 | ||
274 | const struct intel_device_info *info; |
274 | const struct intel_device_info *info; |
275 | 275 | ||
276 | int has_gem; |
276 | int has_gem; |
277 | int relative_constants_mode; |
277 | int relative_constants_mode; |
278 | 278 | ||
279 | void __iomem *regs; |
279 | void __iomem *regs; |
280 | u32 gt_fifo_count; |
280 | u32 gt_fifo_count; |
281 | 281 | ||
282 | struct intel_gmbus { |
282 | struct intel_gmbus { |
283 | struct i2c_adapter adapter; |
283 | struct i2c_adapter adapter; |
284 | struct i2c_adapter *force_bit; |
284 | struct i2c_adapter *force_bit; |
285 | u32 reg0; |
285 | u32 reg0; |
286 | } *gmbus; |
286 | } *gmbus; |
287 | 287 | ||
288 | struct pci_dev *bridge_dev; |
288 | struct pci_dev *bridge_dev; |
289 | struct intel_ring_buffer ring[I915_NUM_RINGS]; |
289 | struct intel_ring_buffer ring[I915_NUM_RINGS]; |
290 | uint32_t next_seqno; |
290 | uint32_t next_seqno; |
291 | 291 | ||
292 | drm_dma_handle_t *status_page_dmah; |
292 | drm_dma_handle_t *status_page_dmah; |
293 | // uint32_t counter; |
293 | // uint32_t counter; |
294 | // drm_local_map_t hws_map; |
294 | // drm_local_map_t hws_map; |
295 | struct drm_i915_gem_object *pwrctx; |
295 | struct drm_i915_gem_object *pwrctx; |
296 | struct drm_i915_gem_object *renderctx; |
296 | struct drm_i915_gem_object *renderctx; |
297 | 297 | ||
298 | // struct resource mch_res; |
298 | // struct resource mch_res; |
299 | 299 | ||
300 | unsigned int cpp; |
300 | unsigned int cpp; |
301 | int back_offset; |
301 | int back_offset; |
302 | int front_offset; |
302 | int front_offset; |
303 | int current_page; |
303 | int current_page; |
304 | int page_flipping; |
304 | int page_flipping; |
305 | 305 | ||
306 | atomic_t irq_received; |
306 | atomic_t irq_received; |
307 | 307 | ||
308 | /* protects the irq masks */ |
308 | /* protects the irq masks */ |
309 | spinlock_t irq_lock; |
309 | spinlock_t irq_lock; |
310 | /** Cached value of IMR to avoid reads in updating the bitfield */ |
310 | /** Cached value of IMR to avoid reads in updating the bitfield */ |
311 | u32 pipestat[2]; |
311 | u32 pipestat[2]; |
312 | u32 irq_mask; |
312 | u32 irq_mask; |
313 | u32 gt_irq_mask; |
313 | u32 gt_irq_mask; |
314 | u32 pch_irq_mask; |
314 | u32 pch_irq_mask; |
315 | 315 | ||
316 | u32 hotplug_supported_mask; |
316 | u32 hotplug_supported_mask; |
317 | // struct work_struct hotplug_work; |
317 | // struct work_struct hotplug_work; |
318 | 318 | ||
319 | int tex_lru_log_granularity; |
319 | int tex_lru_log_granularity; |
320 | int allow_batchbuffer; |
320 | int allow_batchbuffer; |
321 | struct mem_block *agp_heap; |
321 | struct mem_block *agp_heap; |
322 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; |
322 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; |
323 | int vblank_pipe; |
323 | int vblank_pipe; |
324 | int num_pipe; |
324 | int num_pipe; |
325 | 325 | ||
326 | /* For hangcheck timer */ |
326 | /* For hangcheck timer */ |
327 | #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ |
327 | #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ |
328 | struct timer_list hangcheck_timer; |
328 | struct timer_list hangcheck_timer; |
329 | int hangcheck_count; |
329 | int hangcheck_count; |
330 | uint32_t last_acthd; |
330 | uint32_t last_acthd; |
331 | uint32_t last_instdone; |
331 | uint32_t last_instdone; |
332 | uint32_t last_instdone1; |
332 | uint32_t last_instdone1; |
333 | 333 | ||
334 | unsigned long cfb_size; |
334 | unsigned long cfb_size; |
335 | unsigned int cfb_fb; |
335 | unsigned int cfb_fb; |
336 | enum plane cfb_plane; |
336 | enum plane cfb_plane; |
337 | int cfb_y; |
337 | int cfb_y; |
338 | // struct intel_fbc_work *fbc_work; |
338 | // struct intel_fbc_work *fbc_work; |
339 | 339 | ||
340 | struct intel_opregion opregion; |
340 | struct intel_opregion opregion; |
341 | 341 | ||
342 | /* overlay */ |
342 | /* overlay */ |
343 | // struct intel_overlay *overlay; |
343 | // struct intel_overlay *overlay; |
344 | 344 | ||
345 | /* LVDS info */ |
345 | /* LVDS info */ |
346 | int backlight_level; /* restore backlight to this value */ |
346 | int backlight_level; /* restore backlight to this value */ |
347 | bool backlight_enabled; |
347 | bool backlight_enabled; |
348 | struct drm_display_mode *panel_fixed_mode; |
348 | struct drm_display_mode *panel_fixed_mode; |
349 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ |
349 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ |
350 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ |
350 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ |
351 | 351 | ||
352 | /* Feature bits from the VBIOS */ |
352 | /* Feature bits from the VBIOS */ |
353 | unsigned int int_tv_support:1; |
353 | unsigned int int_tv_support:1; |
354 | unsigned int lvds_dither:1; |
354 | unsigned int lvds_dither:1; |
355 | unsigned int lvds_vbt:1; |
355 | unsigned int lvds_vbt:1; |
356 | unsigned int int_crt_support:1; |
356 | unsigned int int_crt_support:1; |
357 | unsigned int lvds_use_ssc:1; |
357 | unsigned int lvds_use_ssc:1; |
358 | int lvds_ssc_freq; |
358 | int lvds_ssc_freq; |
359 | struct { |
359 | struct { |
360 | int rate; |
360 | int rate; |
361 | int lanes; |
361 | int lanes; |
362 | int preemphasis; |
362 | int preemphasis; |
363 | int vswing; |
363 | int vswing; |
364 | 364 | ||
365 | bool initialized; |
365 | bool initialized; |
366 | bool support; |
366 | bool support; |
367 | int bpp; |
367 | int bpp; |
368 | struct edp_power_seq pps; |
368 | struct edp_power_seq pps; |
369 | } edp; |
369 | } edp; |
370 | bool no_aux_handshake; |
370 | bool no_aux_handshake; |
371 | 371 | ||
372 | // struct notifier_block lid_notifier; |
372 | // struct notifier_block lid_notifier; |
373 | 373 | ||
374 | int crt_ddc_pin; |
374 | int crt_ddc_pin; |
375 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ |
375 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ |
376 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
376 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
377 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
377 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
378 | 378 | ||
379 | unsigned int fsb_freq, mem_freq, is_ddr3; |
379 | unsigned int fsb_freq, mem_freq, is_ddr3; |
380 | 380 | ||
381 | spinlock_t error_lock; |
381 | spinlock_t error_lock; |
382 | // struct drm_i915_error_state *first_error; |
382 | // struct drm_i915_error_state *first_error; |
383 | // struct work_struct error_work; |
383 | // struct work_struct error_work; |
384 | // struct completion error_completion; |
384 | // struct completion error_completion; |
385 | // struct workqueue_struct *wq; |
385 | // struct workqueue_struct *wq; |
386 | 386 | ||
387 | /* Display functions */ |
387 | /* Display functions */ |
388 | struct drm_i915_display_funcs display; |
388 | struct drm_i915_display_funcs display; |
389 | 389 | ||
390 | /* PCH chipset type */ |
390 | /* PCH chipset type */ |
391 | enum intel_pch pch_type; |
391 | enum intel_pch pch_type; |
392 | 392 | ||
393 | unsigned long quirks; |
393 | unsigned long quirks; |
394 | 394 | ||
395 | /* Register state */ |
395 | /* Register state */ |
396 | bool modeset_on_lid; |
396 | bool modeset_on_lid; |
397 | u8 saveLBB; |
397 | u8 saveLBB; |
398 | u32 saveDSPACNTR; |
398 | u32 saveDSPACNTR; |
399 | u32 saveDSPBCNTR; |
399 | u32 saveDSPBCNTR; |
400 | u32 saveDSPARB; |
400 | u32 saveDSPARB; |
401 | u32 saveHWS; |
401 | u32 saveHWS; |
402 | u32 savePIPEACONF; |
402 | u32 savePIPEACONF; |
403 | u32 savePIPEBCONF; |
403 | u32 savePIPEBCONF; |
404 | u32 savePIPEASRC; |
404 | u32 savePIPEASRC; |
405 | u32 savePIPEBSRC; |
405 | u32 savePIPEBSRC; |
406 | u32 saveFPA0; |
406 | u32 saveFPA0; |
407 | u32 saveFPA1; |
407 | u32 saveFPA1; |
408 | u32 saveDPLL_A; |
408 | u32 saveDPLL_A; |
409 | u32 saveDPLL_A_MD; |
409 | u32 saveDPLL_A_MD; |
410 | u32 saveHTOTAL_A; |
410 | u32 saveHTOTAL_A; |
411 | u32 saveHBLANK_A; |
411 | u32 saveHBLANK_A; |
412 | u32 saveHSYNC_A; |
412 | u32 saveHSYNC_A; |
413 | u32 saveVTOTAL_A; |
413 | u32 saveVTOTAL_A; |
414 | u32 saveVBLANK_A; |
414 | u32 saveVBLANK_A; |
415 | u32 saveVSYNC_A; |
415 | u32 saveVSYNC_A; |
416 | u32 saveBCLRPAT_A; |
416 | u32 saveBCLRPAT_A; |
417 | u32 saveTRANSACONF; |
417 | u32 saveTRANSACONF; |
418 | u32 saveTRANS_HTOTAL_A; |
418 | u32 saveTRANS_HTOTAL_A; |
419 | u32 saveTRANS_HBLANK_A; |
419 | u32 saveTRANS_HBLANK_A; |
420 | u32 saveTRANS_HSYNC_A; |
420 | u32 saveTRANS_HSYNC_A; |
421 | u32 saveTRANS_VTOTAL_A; |
421 | u32 saveTRANS_VTOTAL_A; |
422 | u32 saveTRANS_VBLANK_A; |
422 | u32 saveTRANS_VBLANK_A; |
423 | u32 saveTRANS_VSYNC_A; |
423 | u32 saveTRANS_VSYNC_A; |
424 | u32 savePIPEASTAT; |
424 | u32 savePIPEASTAT; |
425 | u32 saveDSPASTRIDE; |
425 | u32 saveDSPASTRIDE; |
426 | u32 saveDSPASIZE; |
426 | u32 saveDSPASIZE; |
427 | u32 saveDSPAPOS; |
427 | u32 saveDSPAPOS; |
428 | u32 saveDSPAADDR; |
428 | u32 saveDSPAADDR; |
429 | u32 saveDSPASURF; |
429 | u32 saveDSPASURF; |
430 | u32 saveDSPATILEOFF; |
430 | u32 saveDSPATILEOFF; |
431 | u32 savePFIT_PGM_RATIOS; |
431 | u32 savePFIT_PGM_RATIOS; |
432 | u32 saveBLC_HIST_CTL; |
432 | u32 saveBLC_HIST_CTL; |
433 | u32 saveBLC_PWM_CTL; |
433 | u32 saveBLC_PWM_CTL; |
434 | u32 saveBLC_PWM_CTL2; |
434 | u32 saveBLC_PWM_CTL2; |
435 | u32 saveBLC_CPU_PWM_CTL; |
435 | u32 saveBLC_CPU_PWM_CTL; |
436 | u32 saveBLC_CPU_PWM_CTL2; |
436 | u32 saveBLC_CPU_PWM_CTL2; |
437 | u32 saveFPB0; |
437 | u32 saveFPB0; |
438 | u32 saveFPB1; |
438 | u32 saveFPB1; |
439 | u32 saveDPLL_B; |
439 | u32 saveDPLL_B; |
440 | u32 saveDPLL_B_MD; |
440 | u32 saveDPLL_B_MD; |
441 | u32 saveHTOTAL_B; |
441 | u32 saveHTOTAL_B; |
442 | u32 saveHBLANK_B; |
442 | u32 saveHBLANK_B; |
443 | u32 saveHSYNC_B; |
443 | u32 saveHSYNC_B; |
444 | u32 saveVTOTAL_B; |
444 | u32 saveVTOTAL_B; |
445 | u32 saveVBLANK_B; |
445 | u32 saveVBLANK_B; |
446 | u32 saveVSYNC_B; |
446 | u32 saveVSYNC_B; |
447 | u32 saveBCLRPAT_B; |
447 | u32 saveBCLRPAT_B; |
448 | u32 saveTRANSBCONF; |
448 | u32 saveTRANSBCONF; |
449 | u32 saveTRANS_HTOTAL_B; |
449 | u32 saveTRANS_HTOTAL_B; |
450 | u32 saveTRANS_HBLANK_B; |
450 | u32 saveTRANS_HBLANK_B; |
451 | u32 saveTRANS_HSYNC_B; |
451 | u32 saveTRANS_HSYNC_B; |
452 | u32 saveTRANS_VTOTAL_B; |
452 | u32 saveTRANS_VTOTAL_B; |
453 | u32 saveTRANS_VBLANK_B; |
453 | u32 saveTRANS_VBLANK_B; |
454 | u32 saveTRANS_VSYNC_B; |
454 | u32 saveTRANS_VSYNC_B; |
455 | u32 savePIPEBSTAT; |
455 | u32 savePIPEBSTAT; |
456 | u32 saveDSPBSTRIDE; |
456 | u32 saveDSPBSTRIDE; |
457 | u32 saveDSPBSIZE; |
457 | u32 saveDSPBSIZE; |
458 | u32 saveDSPBPOS; |
458 | u32 saveDSPBPOS; |
459 | u32 saveDSPBADDR; |
459 | u32 saveDSPBADDR; |
460 | u32 saveDSPBSURF; |
460 | u32 saveDSPBSURF; |
461 | u32 saveDSPBTILEOFF; |
461 | u32 saveDSPBTILEOFF; |
462 | u32 saveVGA0; |
462 | u32 saveVGA0; |
463 | u32 saveVGA1; |
463 | u32 saveVGA1; |
464 | u32 saveVGA_PD; |
464 | u32 saveVGA_PD; |
465 | u32 saveVGACNTRL; |
465 | u32 saveVGACNTRL; |
466 | u32 saveADPA; |
466 | u32 saveADPA; |
467 | u32 saveLVDS; |
467 | u32 saveLVDS; |
468 | u32 savePP_ON_DELAYS; |
468 | u32 savePP_ON_DELAYS; |
469 | u32 savePP_OFF_DELAYS; |
469 | u32 savePP_OFF_DELAYS; |
470 | u32 saveDVOA; |
470 | u32 saveDVOA; |
471 | u32 saveDVOB; |
471 | u32 saveDVOB; |
472 | u32 saveDVOC; |
472 | u32 saveDVOC; |
473 | u32 savePP_ON; |
473 | u32 savePP_ON; |
474 | u32 savePP_OFF; |
474 | u32 savePP_OFF; |
475 | u32 savePP_CONTROL; |
475 | u32 savePP_CONTROL; |
476 | u32 savePP_DIVISOR; |
476 | u32 savePP_DIVISOR; |
477 | u32 savePFIT_CONTROL; |
477 | u32 savePFIT_CONTROL; |
478 | u32 save_palette_a[256]; |
478 | u32 save_palette_a[256]; |
479 | u32 save_palette_b[256]; |
479 | u32 save_palette_b[256]; |
480 | u32 saveDPFC_CB_BASE; |
480 | u32 saveDPFC_CB_BASE; |
481 | u32 saveFBC_CFB_BASE; |
481 | u32 saveFBC_CFB_BASE; |
482 | u32 saveFBC_LL_BASE; |
482 | u32 saveFBC_LL_BASE; |
483 | u32 saveFBC_CONTROL; |
483 | u32 saveFBC_CONTROL; |
484 | u32 saveFBC_CONTROL2; |
484 | u32 saveFBC_CONTROL2; |
485 | u32 saveIER; |
485 | u32 saveIER; |
486 | u32 saveIIR; |
486 | u32 saveIIR; |
487 | u32 saveIMR; |
487 | u32 saveIMR; |
488 | u32 saveDEIER; |
488 | u32 saveDEIER; |
489 | u32 saveDEIMR; |
489 | u32 saveDEIMR; |
490 | u32 saveGTIER; |
490 | u32 saveGTIER; |
491 | u32 saveGTIMR; |
491 | u32 saveGTIMR; |
492 | u32 saveFDI_RXA_IMR; |
492 | u32 saveFDI_RXA_IMR; |
493 | u32 saveFDI_RXB_IMR; |
493 | u32 saveFDI_RXB_IMR; |
494 | u32 saveCACHE_MODE_0; |
494 | u32 saveCACHE_MODE_0; |
495 | u32 saveMI_ARB_STATE; |
495 | u32 saveMI_ARB_STATE; |
496 | u32 saveSWF0[16]; |
496 | u32 saveSWF0[16]; |
497 | u32 saveSWF1[16]; |
497 | u32 saveSWF1[16]; |
498 | u32 saveSWF2[3]; |
498 | u32 saveSWF2[3]; |
499 | u8 saveMSR; |
499 | u8 saveMSR; |
500 | u8 saveSR[8]; |
500 | u8 saveSR[8]; |
501 | u8 saveGR[25]; |
501 | u8 saveGR[25]; |
502 | u8 saveAR_INDEX; |
502 | u8 saveAR_INDEX; |
503 | u8 saveAR[21]; |
503 | u8 saveAR[21]; |
504 | u8 saveDACMASK; |
504 | u8 saveDACMASK; |
505 | u8 saveCR[37]; |
505 | u8 saveCR[37]; |
506 | uint64_t saveFENCE[16]; |
506 | uint64_t saveFENCE[16]; |
507 | u32 saveCURACNTR; |
507 | u32 saveCURACNTR; |
508 | u32 saveCURAPOS; |
508 | u32 saveCURAPOS; |
509 | u32 saveCURABASE; |
509 | u32 saveCURABASE; |
510 | u32 saveCURBCNTR; |
510 | u32 saveCURBCNTR; |
511 | u32 saveCURBPOS; |
511 | u32 saveCURBPOS; |
512 | u32 saveCURBBASE; |
512 | u32 saveCURBBASE; |
513 | u32 saveCURSIZE; |
513 | u32 saveCURSIZE; |
514 | u32 saveDP_B; |
514 | u32 saveDP_B; |
515 | u32 saveDP_C; |
515 | u32 saveDP_C; |
516 | u32 saveDP_D; |
516 | u32 saveDP_D; |
517 | u32 savePIPEA_GMCH_DATA_M; |
517 | u32 savePIPEA_GMCH_DATA_M; |
518 | u32 savePIPEB_GMCH_DATA_M; |
518 | u32 savePIPEB_GMCH_DATA_M; |
519 | u32 savePIPEA_GMCH_DATA_N; |
519 | u32 savePIPEA_GMCH_DATA_N; |
520 | u32 savePIPEB_GMCH_DATA_N; |
520 | u32 savePIPEB_GMCH_DATA_N; |
521 | u32 savePIPEA_DP_LINK_M; |
521 | u32 savePIPEA_DP_LINK_M; |
522 | u32 savePIPEB_DP_LINK_M; |
522 | u32 savePIPEB_DP_LINK_M; |
523 | u32 savePIPEA_DP_LINK_N; |
523 | u32 savePIPEA_DP_LINK_N; |
524 | u32 savePIPEB_DP_LINK_N; |
524 | u32 savePIPEB_DP_LINK_N; |
525 | u32 saveFDI_RXA_CTL; |
525 | u32 saveFDI_RXA_CTL; |
526 | u32 saveFDI_TXA_CTL; |
526 | u32 saveFDI_TXA_CTL; |
527 | u32 saveFDI_RXB_CTL; |
527 | u32 saveFDI_RXB_CTL; |
528 | u32 saveFDI_TXB_CTL; |
528 | u32 saveFDI_TXB_CTL; |
529 | u32 savePFA_CTL_1; |
529 | u32 savePFA_CTL_1; |
530 | u32 savePFB_CTL_1; |
530 | u32 savePFB_CTL_1; |
531 | u32 savePFA_WIN_SZ; |
531 | u32 savePFA_WIN_SZ; |
532 | u32 savePFB_WIN_SZ; |
532 | u32 savePFB_WIN_SZ; |
533 | u32 savePFA_WIN_POS; |
533 | u32 savePFA_WIN_POS; |
534 | u32 savePFB_WIN_POS; |
534 | u32 savePFB_WIN_POS; |
535 | u32 savePCH_DREF_CONTROL; |
535 | u32 savePCH_DREF_CONTROL; |
536 | u32 saveDISP_ARB_CTL; |
536 | u32 saveDISP_ARB_CTL; |
537 | u32 savePIPEA_DATA_M1; |
537 | u32 savePIPEA_DATA_M1; |
538 | u32 savePIPEA_DATA_N1; |
538 | u32 savePIPEA_DATA_N1; |
539 | u32 savePIPEA_LINK_M1; |
539 | u32 savePIPEA_LINK_M1; |
540 | u32 savePIPEA_LINK_N1; |
540 | u32 savePIPEA_LINK_N1; |
541 | u32 savePIPEB_DATA_M1; |
541 | u32 savePIPEB_DATA_M1; |
542 | u32 savePIPEB_DATA_N1; |
542 | u32 savePIPEB_DATA_N1; |
543 | u32 savePIPEB_LINK_M1; |
543 | u32 savePIPEB_LINK_M1; |
544 | u32 savePIPEB_LINK_N1; |
544 | u32 savePIPEB_LINK_N1; |
545 | u32 saveMCHBAR_RENDER_STANDBY; |
545 | u32 saveMCHBAR_RENDER_STANDBY; |
546 | u32 savePCH_PORT_HOTPLUG; |
546 | u32 savePCH_PORT_HOTPLUG; |
547 | 547 | ||
548 | struct { |
548 | struct { |
549 | /** Bridge to intel-gtt-ko */ |
549 | /** Bridge to intel-gtt-ko */ |
550 | const struct intel_gtt *gtt; |
550 | const struct intel_gtt *gtt; |
551 | /** Memory allocator for GTT stolen memory */ |
551 | /** Memory allocator for GTT stolen memory */ |
552 | struct drm_mm stolen; |
552 | struct drm_mm stolen; |
553 | /** Memory allocator for GTT */ |
553 | /** Memory allocator for GTT */ |
554 | struct drm_mm gtt_space; |
554 | struct drm_mm gtt_space; |
555 | /** List of all objects in gtt_space. Used to restore gtt |
555 | /** List of all objects in gtt_space. Used to restore gtt |
556 | * mappings on resume */ |
556 | * mappings on resume */ |
557 | struct list_head gtt_list; |
557 | struct list_head gtt_list; |
558 | 558 | ||
559 | /** Usable portion of the GTT for GEM */ |
559 | /** Usable portion of the GTT for GEM */ |
560 | unsigned long gtt_start; |
560 | unsigned long gtt_start; |
561 | unsigned long gtt_mappable_end; |
561 | unsigned long gtt_mappable_end; |
562 | unsigned long gtt_end; |
562 | unsigned long gtt_end; |
563 | 563 | ||
564 | // struct io_mapping *gtt_mapping; |
564 | // struct io_mapping *gtt_mapping; |
565 | int gtt_mtrr; |
565 | int gtt_mtrr; |
566 | 566 | ||
567 | // struct shrinker inactive_shrinker; |
567 | // struct shrinker inactive_shrinker; |
568 | 568 | ||
569 | /** |
569 | /** |
570 | * List of objects currently involved in rendering. |
570 | * List of objects currently involved in rendering. |
571 | * |
571 | * |
572 | * Includes buffers having the contents of their GPU caches |
572 | * Includes buffers having the contents of their GPU caches |
573 | * flushed, not necessarily primitives. last_rendering_seqno |
573 | * flushed, not necessarily primitives. last_rendering_seqno |
574 | * represents when the rendering involved will be completed. |
574 | * represents when the rendering involved will be completed. |
575 | * |
575 | * |
576 | * A reference is held on the buffer while on this list. |
576 | * A reference is held on the buffer while on this list. |
577 | */ |
577 | */ |
578 | struct list_head active_list; |
578 | struct list_head active_list; |
579 | 579 | ||
580 | /** |
580 | /** |
581 | * List of objects which are not in the ringbuffer but which |
581 | * List of objects which are not in the ringbuffer but which |
582 | * still have a write_domain which needs to be flushed before |
582 | * still have a write_domain which needs to be flushed before |
583 | * unbinding. |
583 | * unbinding. |
584 | * |
584 | * |
585 | * last_rendering_seqno is 0 while an object is in this list. |
585 | * last_rendering_seqno is 0 while an object is in this list. |
586 | * |
586 | * |
587 | * A reference is held on the buffer while on this list. |
587 | * A reference is held on the buffer while on this list. |
588 | */ |
588 | */ |
589 | struct list_head flushing_list; |
589 | struct list_head flushing_list; |
590 | 590 | ||
591 | /** |
591 | /** |
592 | * LRU list of objects which are not in the ringbuffer and |
592 | * LRU list of objects which are not in the ringbuffer and |
593 | * are ready to unbind, but are still in the GTT. |
593 | * are ready to unbind, but are still in the GTT. |
594 | * |
594 | * |
595 | * last_rendering_seqno is 0 while an object is in this list. |
595 | * last_rendering_seqno is 0 while an object is in this list. |
596 | * |
596 | * |
597 | * A reference is not held on the buffer while on this list, |
597 | * A reference is not held on the buffer while on this list, |
598 | * as merely being GTT-bound shouldn't prevent its being |
598 | * as merely being GTT-bound shouldn't prevent its being |
599 | * freed, and we'll pull it off the list in the free path. |
599 | * freed, and we'll pull it off the list in the free path. |
600 | */ |
600 | */ |
601 | struct list_head inactive_list; |
601 | struct list_head inactive_list; |
602 | 602 | ||
603 | /** |
603 | /** |
604 | * LRU list of objects which are not in the ringbuffer but |
604 | * LRU list of objects which are not in the ringbuffer but |
605 | * are still pinned in the GTT. |
605 | * are still pinned in the GTT. |
606 | */ |
606 | */ |
607 | struct list_head pinned_list; |
607 | struct list_head pinned_list; |
608 | 608 | ||
609 | /** LRU list of objects with fence regs on them. */ |
609 | /** LRU list of objects with fence regs on them. */ |
610 | struct list_head fence_list; |
610 | struct list_head fence_list; |
611 | 611 | ||
612 | /** |
612 | /** |
613 | * List of objects currently pending being freed. |
613 | * List of objects currently pending being freed. |
614 | * |
614 | * |
615 | * These objects are no longer in use, but due to a signal |
615 | * These objects are no longer in use, but due to a signal |
616 | * we were prevented from freeing them at the appointed time. |
616 | * we were prevented from freeing them at the appointed time. |
617 | */ |
617 | */ |
618 | struct list_head deferred_free_list; |
618 | struct list_head deferred_free_list; |
619 | 619 | ||
620 | /** |
620 | /** |
621 | * We leave the user IRQ off as much as possible, |
621 | * We leave the user IRQ off as much as possible, |
622 | * but this means that requests will finish and never |
622 | * but this means that requests will finish and never |
623 | * be retired once the system goes idle. Set a timer to |
623 | * be retired once the system goes idle. Set a timer to |
624 | * fire periodically while the ring is running. When it |
624 | * fire periodically while the ring is running. When it |
625 | * fires, go retire requests. |
625 | * fires, go retire requests. |
626 | */ |
626 | */ |
627 | // struct delayed_work retire_work; |
627 | // struct delayed_work retire_work; |
628 | 628 | ||
629 | /** |
629 | /** |
630 | * Are we in a non-interruptible section of code like |
630 | * Are we in a non-interruptible section of code like |
631 | * modesetting? |
631 | * modesetting? |
632 | */ |
632 | */ |
633 | bool interruptible; |
633 | bool interruptible; |
634 | 634 | ||
635 | /** |
635 | /** |
636 | * Flag if the X Server, and thus DRM, is not currently in |
636 | * Flag if the X Server, and thus DRM, is not currently in |
637 | * control of the device. |
637 | * control of the device. |
638 | * |
638 | * |
639 | * This is set between LeaveVT and EnterVT. It needs to be |
639 | * This is set between LeaveVT and EnterVT. It needs to be |
640 | * replaced with a semaphore. It also needs to be |
640 | * replaced with a semaphore. It also needs to be |
641 | * transitioned away from for kernel modesetting. |
641 | * transitioned away from for kernel modesetting. |
642 | */ |
642 | */ |
643 | int suspended; |
643 | int suspended; |
644 | 644 | ||
645 | /** |
645 | /** |
646 | * Flag if the hardware appears to be wedged. |
646 | * Flag if the hardware appears to be wedged. |
647 | * |
647 | * |
648 | * This is set when attempts to idle the device timeout. |
648 | * This is set when attempts to idle the device timeout. |
649 | * It prevents command submission from occurring and makes |
649 | * It prevents command submission from occurring and makes |
650 | * every pending request fail |
650 | * every pending request fail |
651 | */ |
651 | */ |
652 | atomic_t wedged; |
652 | atomic_t wedged; |
653 | 653 | ||
654 | /** Bit 6 swizzling required for X tiling */ |
654 | /** Bit 6 swizzling required for X tiling */ |
655 | uint32_t bit_6_swizzle_x; |
655 | uint32_t bit_6_swizzle_x; |
656 | /** Bit 6 swizzling required for Y tiling */ |
656 | /** Bit 6 swizzling required for Y tiling */ |
657 | uint32_t bit_6_swizzle_y; |
657 | uint32_t bit_6_swizzle_y; |
658 | 658 | ||
659 | /* storage for physical objects */ |
659 | /* storage for physical objects */ |
660 | // struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; |
660 | // struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; |
661 | 661 | ||
662 | /* accounting, useful for userland debugging */ |
662 | /* accounting, useful for userland debugging */ |
663 | size_t gtt_total; |
663 | size_t gtt_total; |
664 | size_t mappable_gtt_total; |
664 | size_t mappable_gtt_total; |
665 | size_t object_memory; |
665 | size_t object_memory; |
666 | u32 object_count; |
666 | u32 object_count; |
667 | } mm; |
667 | } mm; |
668 | struct sdvo_device_mapping sdvo_mappings[2]; |
668 | struct sdvo_device_mapping sdvo_mappings[2]; |
669 | /* indicate whether the LVDS_BORDER should be enabled or not */ |
669 | /* indicate whether the LVDS_BORDER should be enabled or not */ |
670 | unsigned int lvds_border_bits; |
670 | unsigned int lvds_border_bits; |
671 | /* Panel fitter placement and size for Ironlake+ */ |
671 | /* Panel fitter placement and size for Ironlake+ */ |
672 | u32 pch_pf_pos, pch_pf_size; |
672 | u32 pch_pf_pos, pch_pf_size; |
673 | int panel_t3, panel_t12; |
673 | int panel_t3, panel_t12; |
674 | 674 | ||
675 | struct drm_crtc *plane_to_crtc_mapping[2]; |
675 | struct drm_crtc *plane_to_crtc_mapping[2]; |
676 | struct drm_crtc *pipe_to_crtc_mapping[2]; |
676 | struct drm_crtc *pipe_to_crtc_mapping[2]; |
677 | // wait_queue_head_t pending_flip_queue; |
677 | // wait_queue_head_t pending_flip_queue; |
678 | bool flip_pending_is_done; |
678 | bool flip_pending_is_done; |
679 | 679 | ||
680 | /* Reclocking support */ |
680 | /* Reclocking support */ |
681 | bool render_reclock_avail; |
681 | bool render_reclock_avail; |
682 | bool lvds_downclock_avail; |
682 | bool lvds_downclock_avail; |
683 | /* indicates the reduced downclock for LVDS*/ |
683 | /* indicates the reduced downclock for LVDS*/ |
684 | int lvds_downclock; |
684 | int lvds_downclock; |
685 | // struct work_struct idle_work; |
685 | // struct work_struct idle_work; |
686 | struct timer_list idle_timer; |
686 | struct timer_list idle_timer; |
687 | bool busy; |
687 | bool busy; |
688 | u16 orig_clock; |
688 | u16 orig_clock; |
689 | int child_dev_num; |
689 | int child_dev_num; |
690 | struct child_device_config *child_dev; |
690 | struct child_device_config *child_dev; |
691 | struct drm_connector *int_lvds_connector; |
691 | struct drm_connector *int_lvds_connector; |
692 | struct drm_connector *int_edp_connector; |
692 | struct drm_connector *int_edp_connector; |
693 | 693 | ||
694 | bool mchbar_need_disable; |
694 | bool mchbar_need_disable; |
695 | 695 | ||
696 | // struct work_struct rps_work; |
696 | // struct work_struct rps_work; |
697 | spinlock_t rps_lock; |
697 | spinlock_t rps_lock; |
698 | u32 pm_iir; |
698 | u32 pm_iir; |
699 | 699 | ||
700 | u8 cur_delay; |
700 | u8 cur_delay; |
701 | u8 min_delay; |
701 | u8 min_delay; |
702 | u8 max_delay; |
702 | u8 max_delay; |
703 | u8 fmax; |
703 | u8 fmax; |
704 | u8 fstart; |
704 | u8 fstart; |
705 | 705 | ||
706 | u64 last_count1; |
706 | u64 last_count1; |
707 | unsigned long last_time1; |
707 | unsigned long last_time1; |
708 | u64 last_count2; |
708 | u64 last_count2; |
709 | struct timespec last_time2; |
709 | struct timespec last_time2; |
710 | unsigned long gfx_power; |
710 | unsigned long gfx_power; |
711 | int c_m; |
711 | int c_m; |
712 | int r_t; |
712 | int r_t; |
713 | u8 corr; |
713 | u8 corr; |
714 | spinlock_t *mchdev_lock; |
714 | spinlock_t *mchdev_lock; |
715 | 715 | ||
716 | enum no_fbc_reason no_fbc_reason; |
716 | enum no_fbc_reason no_fbc_reason; |
717 | 717 | ||
718 | // struct drm_mm_node *compressed_fb; |
718 | // struct drm_mm_node *compressed_fb; |
719 | // struct drm_mm_node *compressed_llb; |
719 | // struct drm_mm_node *compressed_llb; |
720 | 720 | ||
721 | unsigned long last_gpu_reset; |
721 | unsigned long last_gpu_reset; |
722 | 722 | ||
723 | /* list of fbdev register on this device */ |
723 | /* list of fbdev register on this device */ |
724 | struct intel_fbdev *fbdev; |
724 | struct intel_fbdev *fbdev; |
725 | 725 | ||
726 | // struct backlight_device *backlight; |
726 | // struct backlight_device *backlight; |
727 | 727 | ||
728 | // struct drm_property *broadcast_rgb_property; |
728 | // struct drm_property *broadcast_rgb_property; |
729 | // struct drm_property *force_audio_property; |
729 | // struct drm_property *force_audio_property; |
730 | 730 | ||
731 | atomic_t forcewake_count; |
731 | atomic_t forcewake_count; |
732 | } drm_i915_private_t; |
732 | } drm_i915_private_t; |
733 | 733 | ||
734 | enum i915_cache_level { |
734 | enum i915_cache_level { |
735 | I915_CACHE_NONE, |
735 | I915_CACHE_NONE, |
736 | I915_CACHE_LLC, |
736 | I915_CACHE_LLC, |
737 | I915_CACHE_LLC_MLC, /* gen6+ */ |
737 | I915_CACHE_LLC_MLC, /* gen6+ */ |
738 | }; |
738 | }; |
739 | 739 | ||
740 | struct drm_i915_gem_object { |
740 | struct drm_i915_gem_object { |
741 | struct drm_gem_object base; |
741 | struct drm_gem_object base; |
742 | 742 | ||
743 | /** Current space allocated to this object in the GTT, if any. */ |
743 | /** Current space allocated to this object in the GTT, if any. */ |
744 | struct drm_mm_node *gtt_space; |
744 | struct drm_mm_node *gtt_space; |
745 | struct list_head gtt_list; |
745 | struct list_head gtt_list; |
746 | 746 | ||
747 | /** This object's place on the active/flushing/inactive lists */ |
747 | /** This object's place on the active/flushing/inactive lists */ |
748 | struct list_head ring_list; |
748 | struct list_head ring_list; |
749 | struct list_head mm_list; |
749 | struct list_head mm_list; |
750 | /** This object's place on GPU write list */ |
750 | /** This object's place on GPU write list */ |
751 | struct list_head gpu_write_list; |
751 | struct list_head gpu_write_list; |
752 | /** This object's place in the batchbuffer or on the eviction list */ |
752 | /** This object's place in the batchbuffer or on the eviction list */ |
753 | struct list_head exec_list; |
753 | struct list_head exec_list; |
754 | 754 | ||
755 | /** |
755 | /** |
756 | * This is set if the object is on the active or flushing lists |
756 | * This is set if the object is on the active or flushing lists |
757 | * (has pending rendering), and is not set if it's on inactive (ready |
757 | * (has pending rendering), and is not set if it's on inactive (ready |
758 | * to be unbound). |
758 | * to be unbound). |
759 | */ |
759 | */ |
760 | unsigned int active : 1; |
760 | unsigned int active : 1; |
761 | 761 | ||
762 | /** |
762 | /** |
763 | * This is set if the object has been written to since last bound |
763 | * This is set if the object has been written to since last bound |
764 | * to the GTT |
764 | * to the GTT |
765 | */ |
765 | */ |
766 | unsigned int dirty : 1; |
766 | unsigned int dirty : 1; |
767 | 767 | ||
768 | /** |
768 | /** |
769 | * This is set if the object has been written to since the last |
769 | * This is set if the object has been written to since the last |
770 | * GPU flush. |
770 | * GPU flush. |
771 | */ |
771 | */ |
772 | unsigned int pending_gpu_write : 1; |
772 | unsigned int pending_gpu_write : 1; |
773 | 773 | ||
774 | /** |
774 | /** |
775 | * Fence register bits (if any) for this object. Will be set |
775 | * Fence register bits (if any) for this object. Will be set |
776 | * as needed when mapped into the GTT. |
776 | * as needed when mapped into the GTT. |
777 | * Protected by dev->struct_mutex. |
777 | * Protected by dev->struct_mutex. |
778 | * |
778 | * |
779 | * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) |
779 | * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) |
780 | */ |
780 | */ |
781 | signed int fence_reg : 5; |
781 | signed int fence_reg : 5; |
782 | 782 | ||
783 | /** |
783 | /** |
784 | * Advice: are the backing pages purgeable? |
784 | * Advice: are the backing pages purgeable? |
785 | */ |
785 | */ |
786 | unsigned int madv : 2; |
786 | unsigned int madv : 2; |
787 | 787 | ||
788 | /** |
788 | /** |
789 | * Current tiling mode for the object. |
789 | * Current tiling mode for the object. |
790 | */ |
790 | */ |
791 | unsigned int tiling_mode : 2; |
791 | unsigned int tiling_mode : 2; |
792 | unsigned int tiling_changed : 1; |
792 | unsigned int tiling_changed : 1; |
793 | 793 | ||
794 | /** How many users have pinned this object in GTT space. The following |
794 | /** How many users have pinned this object in GTT space. The following |
795 | * users can each hold at most one reference: pwrite/pread, pin_ioctl |
795 | * users can each hold at most one reference: pwrite/pread, pin_ioctl |
796 | * (via user_pin_count), execbuffer (objects are not allowed multiple |
796 | * (via user_pin_count), execbuffer (objects are not allowed multiple |
797 | * times for the same batchbuffer), and the framebuffer code. When |
797 | * times for the same batchbuffer), and the framebuffer code. When |
798 | * switching/pageflipping, the framebuffer code has at most two buffers |
798 | * switching/pageflipping, the framebuffer code has at most two buffers |
799 | * pinned per crtc. |
799 | * pinned per crtc. |
800 | * |
800 | * |
801 | * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 |
801 | * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 |
802 | * bits with absolutely no headroom. So use 4 bits. */ |
802 | * bits with absolutely no headroom. So use 4 bits. */ |
803 | unsigned int pin_count : 4; |
803 | unsigned int pin_count : 4; |
804 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf |
804 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf |
805 | 805 | ||
806 | /** |
806 | /** |
807 | * Is the object at the current location in the gtt mappable and |
807 | * Is the object at the current location in the gtt mappable and |
808 | * fenceable? Used to avoid costly recalculations. |
808 | * fenceable? Used to avoid costly recalculations. |
809 | */ |
809 | */ |
810 | unsigned int map_and_fenceable : 1; |
810 | unsigned int map_and_fenceable : 1; |
811 | 811 | ||
812 | /** |
812 | /** |
813 | * Whether the current gtt mapping needs to be mappable (and isn't just |
813 | * Whether the current gtt mapping needs to be mappable (and isn't just |
814 | * mappable by accident). Track pin and fault separate for a more |
814 | * mappable by accident). Track pin and fault separate for a more |
815 | * accurate mappable working set. |
815 | * accurate mappable working set. |
816 | */ |
816 | */ |
817 | unsigned int fault_mappable : 1; |
817 | unsigned int fault_mappable : 1; |
818 | unsigned int pin_mappable : 1; |
818 | unsigned int pin_mappable : 1; |
819 | 819 | ||
820 | /* |
820 | /* |
821 | * Is the GPU currently using a fence to access this buffer, |
821 | * Is the GPU currently using a fence to access this buffer, |
822 | */ |
822 | */ |
823 | unsigned int pending_fenced_gpu_access:1; |
823 | unsigned int pending_fenced_gpu_access:1; |
824 | unsigned int fenced_gpu_access:1; |
824 | unsigned int fenced_gpu_access:1; |
825 | 825 | ||
826 | unsigned int cache_level:2; |
826 | unsigned int cache_level:2; |
827 | 827 | ||
828 | struct page **pages; |
828 | struct page **pages; |
829 | 829 | ||
830 | /** |
830 | /** |
831 | * DMAR support |
831 | * DMAR support |
832 | */ |
832 | */ |
833 | struct scatterlist *sg_list; |
833 | struct scatterlist *sg_list; |
834 | int num_sg; |
834 | int num_sg; |
835 | 835 | ||
836 | /** |
836 | /** |
837 | * Used for performing relocations during execbuffer insertion. |
837 | * Used for performing relocations during execbuffer insertion. |
838 | */ |
838 | */ |
839 | struct hlist_node exec_node; |
839 | struct hlist_node exec_node; |
840 | unsigned long exec_handle; |
840 | unsigned long exec_handle; |
841 | struct drm_i915_gem_exec_object2 *exec_entry; |
841 | struct drm_i915_gem_exec_object2 *exec_entry; |
842 | 842 | ||
843 | /** |
843 | /** |
844 | * Current offset of the object in GTT space. |
844 | * Current offset of the object in GTT space. |
845 | * |
845 | * |
846 | * This is the same as gtt_space->start |
846 | * This is the same as gtt_space->start |
847 | */ |
847 | */ |
848 | uint32_t gtt_offset; |
848 | uint32_t gtt_offset; |
849 | 849 | ||
850 | /** Breadcrumb of last rendering to the buffer. */ |
850 | /** Breadcrumb of last rendering to the buffer. */ |
851 | uint32_t last_rendering_seqno; |
851 | uint32_t last_rendering_seqno; |
852 | struct intel_ring_buffer *ring; |
852 | struct intel_ring_buffer *ring; |
853 | 853 | ||
854 | /** Breadcrumb of last fenced GPU access to the buffer. */ |
854 | /** Breadcrumb of last fenced GPU access to the buffer. */ |
855 | uint32_t last_fenced_seqno; |
855 | uint32_t last_fenced_seqno; |
856 | struct intel_ring_buffer *last_fenced_ring; |
856 | struct intel_ring_buffer *last_fenced_ring; |
857 | 857 | ||
858 | /** Current tiling stride for the object, if it's tiled. */ |
858 | /** Current tiling stride for the object, if it's tiled. */ |
859 | uint32_t stride; |
859 | uint32_t stride; |
860 | 860 | ||
861 | /** Record of address bit 17 of each page at last unbind. */ |
861 | /** Record of address bit 17 of each page at last unbind. */ |
862 | unsigned long *bit_17; |
862 | unsigned long *bit_17; |
863 | 863 | ||
864 | 864 | ||
865 | /** |
865 | /** |
866 | * If present, while GEM_DOMAIN_CPU is in the read domain this array |
866 | * If present, while GEM_DOMAIN_CPU is in the read domain this array |
867 | * flags which individual pages are valid. |
867 | * flags which individual pages are valid. |
868 | */ |
868 | */ |
869 | uint8_t *page_cpu_valid; |
869 | uint8_t *page_cpu_valid; |
870 | 870 | ||
871 | /** User space pin count and filp owning the pin */ |
871 | /** User space pin count and filp owning the pin */ |
872 | uint32_t user_pin_count; |
872 | uint32_t user_pin_count; |
873 | struct drm_file *pin_filp; |
873 | struct drm_file *pin_filp; |
874 | 874 | ||
875 | /** for phy allocated objects */ |
875 | /** for phy allocated objects */ |
876 | struct drm_i915_gem_phys_object *phys_obj; |
876 | struct drm_i915_gem_phys_object *phys_obj; |
877 | 877 | ||
878 | /** |
878 | /** |
879 | * Number of crtcs where this object is currently the fb, but |
879 | * Number of crtcs where this object is currently the fb, but |
880 | * will be page flipped away on the next vblank. When it |
880 | * will be page flipped away on the next vblank. When it |
881 | * reaches 0, dev_priv->pending_flip_queue will be woken up. |
881 | * reaches 0, dev_priv->pending_flip_queue will be woken up. |
882 | */ |
882 | */ |
883 | atomic_t pending_flip; |
883 | atomic_t pending_flip; |
884 | }; |
884 | }; |
885 | 885 | ||
886 | 886 | ||
887 | #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) |
887 | #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) |
888 | 888 | ||
889 | /** |
889 | /** |
890 | * Request queue structure. |
890 | * Request queue structure. |
891 | * |
891 | * |
892 | * The request queue allows us to note sequence numbers that have been emitted |
892 | * The request queue allows us to note sequence numbers that have been emitted |
893 | * and may be associated with active buffers to be retired. |
893 | * and may be associated with active buffers to be retired. |
894 | * |
894 | * |
895 | * By keeping this list, we can avoid having to do questionable |
895 | * By keeping this list, we can avoid having to do questionable |
896 | * sequence-number comparisons on buffer last_rendering_seqnos, and associate |
896 | * sequence-number comparisons on buffer last_rendering_seqnos, and associate |
897 | * an emission time with seqnos for tracking how far ahead of the GPU we are. |
897 | * an emission time with seqnos for tracking how far ahead of the GPU we are. |
898 | */ |
898 | */ |
899 | struct drm_i915_gem_request { |
899 | struct drm_i915_gem_request { |
900 | /** On Which ring this request was generated */ |
900 | /** On Which ring this request was generated */ |
901 | struct intel_ring_buffer *ring; |
901 | struct intel_ring_buffer *ring; |
902 | 902 | ||
903 | /** GEM sequence number associated with this request. */ |
903 | /** GEM sequence number associated with this request. */ |
904 | uint32_t seqno; |
904 | uint32_t seqno; |
905 | 905 | ||
906 | /** Time at which this request was emitted, in jiffies. */ |
906 | /** Time at which this request was emitted, in jiffies. */ |
907 | unsigned long emitted_jiffies; |
907 | unsigned long emitted_jiffies; |
908 | 908 | ||
909 | /** global list entry for this request */ |
909 | /** global list entry for this request */ |
910 | struct list_head list; |
910 | struct list_head list; |
911 | 911 | ||
912 | struct drm_i915_file_private *file_priv; |
912 | struct drm_i915_file_private *file_priv; |
913 | /** file_priv list entry for this request */ |
913 | /** file_priv list entry for this request */ |
914 | struct list_head client_list; |
914 | struct list_head client_list; |
915 | }; |
915 | }; |
916 | 916 | ||
917 | struct drm_i915_file_private { |
917 | struct drm_i915_file_private { |
918 | struct { |
918 | struct { |
919 | // struct spinlock lock; |
919 | // struct spinlock lock; |
920 | struct list_head request_list; |
920 | struct list_head request_list; |
921 | } mm; |
921 | } mm; |
922 | }; |
922 | }; |
923 | 923 | ||
924 | #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) |
924 | #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) |
925 | 925 | ||
926 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) |
926 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) |
927 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) |
927 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) |
928 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) |
928 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) |
929 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) |
929 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) |
930 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) |
930 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) |
931 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) |
931 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) |
932 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) |
932 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) |
933 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) |
933 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) |
934 | #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) |
934 | #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) |
935 | #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) |
935 | #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) |
936 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) |
936 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) |
937 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) |
937 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) |
938 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) |
938 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) |
939 | #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) |
939 | #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) |
940 | #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) |
940 | #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) |
941 | #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) |
941 | #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) |
942 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) |
942 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) |
943 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) |
943 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) |
944 | #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) |
944 | #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) |
945 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
945 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
946 | 946 | ||
947 | /* |
947 | /* |
948 | * The genX designation typically refers to the render engine, so render |
948 | * The genX designation typically refers to the render engine, so render |
949 | * capability related checks should use IS_GEN, while display and other checks |
949 | * capability related checks should use IS_GEN, while display and other checks |
950 | * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular |
950 | * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular |
951 | * chips, etc.). |
951 | * chips, etc.). |
952 | */ |
952 | */ |
953 | #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) |
953 | #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) |
954 | #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) |
954 | #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) |
955 | #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) |
955 | #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) |
956 | #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) |
956 | #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) |
957 | #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) |
957 | #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) |
958 | #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) |
958 | #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) |
959 | 959 | ||
960 | #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) |
960 | #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) |
961 | #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) |
961 | #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) |
962 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
962 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
963 | 963 | ||
964 | #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) |
964 | #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) |
965 | #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) |
965 | #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) |
966 | 966 | ||
967 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
967 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
968 | * rows, which changed the alignment requirements and fence programming. |
968 | * rows, which changed the alignment requirements and fence programming. |
969 | */ |
969 | */ |
970 | #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ |
970 | #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ |
971 | IS_I915GM(dev))) |
971 | IS_I915GM(dev))) |
972 | #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) |
972 | #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) |
973 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) |
973 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) |
974 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) |
974 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) |
975 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) |
975 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) |
976 | #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) |
976 | #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) |
977 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) |
977 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) |
978 | /* dsparb controlled by hw only */ |
978 | /* dsparb controlled by hw only */ |
979 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) |
979 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) |
980 | 980 | ||
981 | #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) |
981 | #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) |
982 | #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) |
982 | #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) |
983 | #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) |
983 | #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) |
984 | 984 | ||
985 | #define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) |
985 | #define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) |
986 | #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) |
986 | #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) |
987 | 987 | ||
988 | #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) |
988 | #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) |
989 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) |
989 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) |
990 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) |
990 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) |
991 | 991 | ||
992 | //#include "i915_trace.h" |
992 | //#include "i915_trace.h" |
993 | 993 | ||
994 | extern int i915_max_ioctl; |
994 | extern int i915_max_ioctl; |
995 | extern unsigned int i915_fbpercrtc; |
995 | extern unsigned int i915_fbpercrtc; |
996 | extern int i915_panel_ignore_lid; |
996 | extern int i915_panel_ignore_lid; |
997 | extern unsigned int i915_powersave; |
997 | extern unsigned int i915_powersave; |
998 | extern unsigned int i915_semaphores; |
998 | extern unsigned int i915_semaphores; |
999 | extern unsigned int i915_lvds_downclock; |
999 | extern unsigned int i915_lvds_downclock; |
1000 | extern unsigned int i915_panel_use_ssc; |
1000 | extern unsigned int i915_panel_use_ssc; |
1001 | extern int i915_vbt_sdvo_panel_type; |
1001 | extern int i915_vbt_sdvo_panel_type; |
1002 | extern unsigned int i915_enable_rc6; |
1002 | extern unsigned int i915_enable_rc6; |
1003 | extern unsigned int i915_enable_fbc; |
1003 | extern unsigned int i915_enable_fbc; |
1004 | extern bool i915_enable_hangcheck; |
1004 | extern bool i915_enable_hangcheck; |
1005 | 1005 | ||
1006 | extern int i915_resume(struct drm_device *dev); |
1006 | extern int i915_resume(struct drm_device *dev); |
1007 | extern int i915_master_create(struct drm_device *dev, struct drm_master *master); |
1007 | extern int i915_master_create(struct drm_device *dev, struct drm_master *master); |
1008 | extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); |
1008 | extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); |
1009 | 1009 | ||
1010 | /* i915_dma.c */ |
1010 | /* i915_dma.c */ |
1011 | extern void i915_kernel_lost_context(struct drm_device * dev); |
1011 | extern void i915_kernel_lost_context(struct drm_device * dev); |
1012 | extern int i915_driver_load(struct drm_device *, unsigned long flags); |
1012 | extern int i915_driver_load(struct drm_device *, unsigned long flags); |
1013 | extern int i915_driver_unload(struct drm_device *); |
1013 | extern int i915_driver_unload(struct drm_device *); |
1014 | extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); |
1014 | extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); |
1015 | extern void i915_driver_lastclose(struct drm_device * dev); |
1015 | extern void i915_driver_lastclose(struct drm_device * dev); |
1016 | extern void i915_driver_preclose(struct drm_device *dev, |
1016 | extern void i915_driver_preclose(struct drm_device *dev, |
1017 | struct drm_file *file_priv); |
1017 | struct drm_file *file_priv); |
1018 | extern void i915_driver_postclose(struct drm_device *dev, |
1018 | extern void i915_driver_postclose(struct drm_device *dev, |
1019 | struct drm_file *file_priv); |
1019 | struct drm_file *file_priv); |
1020 | extern int i915_driver_device_is_agp(struct drm_device * dev); |
1020 | extern int i915_driver_device_is_agp(struct drm_device * dev); |
1021 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, |
1021 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, |
1022 | unsigned long arg); |
1022 | unsigned long arg); |
1023 | extern int i915_emit_box(struct drm_device *dev, |
1023 | extern int i915_emit_box(struct drm_device *dev, |
1024 | struct drm_clip_rect *box, |
1024 | struct drm_clip_rect *box, |
1025 | int DR1, int DR4); |
1025 | int DR1, int DR4); |
1026 | extern int i915_reset(struct drm_device *dev, u8 flags); |
1026 | extern int i915_reset(struct drm_device *dev, u8 flags); |
1027 | extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); |
1027 | extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); |
1028 | extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); |
1028 | extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); |
1029 | extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); |
1029 | extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); |
1030 | extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); |
1030 | extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); |
1031 | 1031 | ||
1032 | 1032 | ||
1033 | /* i915_irq.c */ |
1033 | /* i915_irq.c */ |
1034 | void i915_hangcheck_elapsed(unsigned long data); |
1034 | void i915_hangcheck_elapsed(unsigned long data); |
1035 | void i915_handle_error(struct drm_device *dev, bool wedged); |
1035 | void i915_handle_error(struct drm_device *dev, bool wedged); |
1036 | extern int i915_irq_emit(struct drm_device *dev, void *data, |
1036 | extern int i915_irq_emit(struct drm_device *dev, void *data, |
1037 | struct drm_file *file_priv); |
1037 | struct drm_file *file_priv); |
1038 | extern int i915_irq_wait(struct drm_device *dev, void *data, |
1038 | extern int i915_irq_wait(struct drm_device *dev, void *data, |
1039 | struct drm_file *file_priv); |
1039 | struct drm_file *file_priv); |
1040 | 1040 | ||
1041 | extern void intel_irq_init(struct drm_device *dev); |
1041 | extern void intel_irq_init(struct drm_device *dev); |
1042 | 1042 | ||
1043 | extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, |
1043 | extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, |
1044 | struct drm_file *file_priv); |
1044 | struct drm_file *file_priv); |
1045 | extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, |
1045 | extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, |
1046 | struct drm_file *file_priv); |
1046 | struct drm_file *file_priv); |
1047 | extern int i915_vblank_swap(struct drm_device *dev, void *data, |
1047 | extern int i915_vblank_swap(struct drm_device *dev, void *data, |
1048 | struct drm_file *file_priv); |
1048 | struct drm_file *file_priv); |
1049 | 1049 | ||
1050 | void |
1050 | void |
1051 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); |
1051 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); |
1052 | 1052 | ||
1053 | void |
1053 | void |
1054 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); |
1054 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); |
1055 | 1055 | ||
1056 | void intel_enable_asle (struct drm_device *dev); |
1056 | void intel_enable_asle (struct drm_device *dev); |
1057 | 1057 | ||
1058 | #ifdef CONFIG_DEBUG_FS |
1058 | #ifdef CONFIG_DEBUG_FS |
1059 | extern void i915_destroy_error_state(struct drm_device *dev); |
1059 | extern void i915_destroy_error_state(struct drm_device *dev); |
1060 | #else |
1060 | #else |
1061 | #define i915_destroy_error_state(x) |
1061 | #define i915_destroy_error_state(x) |
1062 | #endif |
1062 | #endif |
1063 | 1063 | ||
1064 | 1064 | ||
1065 | /* i915_mem.c */ |
1065 | /* i915_mem.c */ |
1066 | extern int i915_mem_alloc(struct drm_device *dev, void *data, |
1066 | extern int i915_mem_alloc(struct drm_device *dev, void *data, |
1067 | struct drm_file *file_priv); |
1067 | struct drm_file *file_priv); |
1068 | extern int i915_mem_free(struct drm_device *dev, void *data, |
1068 | extern int i915_mem_free(struct drm_device *dev, void *data, |
1069 | struct drm_file *file_priv); |
1069 | struct drm_file *file_priv); |
1070 | extern int i915_mem_init_heap(struct drm_device *dev, void *data, |
1070 | extern int i915_mem_init_heap(struct drm_device *dev, void *data, |
1071 | struct drm_file *file_priv); |
1071 | struct drm_file *file_priv); |
1072 | extern int i915_mem_destroy_heap(struct drm_device *dev, void *data, |
1072 | extern int i915_mem_destroy_heap(struct drm_device *dev, void *data, |
1073 | struct drm_file *file_priv); |
1073 | struct drm_file *file_priv); |
1074 | extern void i915_mem_takedown(struct mem_block **heap); |
1074 | extern void i915_mem_takedown(struct mem_block **heap); |
1075 | extern void i915_mem_release(struct drm_device * dev, |
1075 | extern void i915_mem_release(struct drm_device * dev, |
1076 | struct drm_file *file_priv, struct mem_block *heap); |
1076 | struct drm_file *file_priv, struct mem_block *heap); |
1077 | /* i915_gem.c */ |
1077 | /* i915_gem.c */ |
1078 | int i915_gem_init_ioctl(struct drm_device *dev, void *data, |
1078 | int i915_gem_init_ioctl(struct drm_device *dev, void *data, |
1079 | struct drm_file *file_priv); |
1079 | struct drm_file *file_priv); |
1080 | int i915_gem_create_ioctl(struct drm_device *dev, void *data, |
1080 | int i915_gem_create_ioctl(struct drm_device *dev, void *data, |
1081 | struct drm_file *file_priv); |
1081 | struct drm_file *file_priv); |
1082 | int i915_gem_pread_ioctl(struct drm_device *dev, void *data, |
1082 | int i915_gem_pread_ioctl(struct drm_device *dev, void *data, |
1083 | struct drm_file *file_priv); |
1083 | struct drm_file *file_priv); |
1084 | int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, |
1084 | int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, |
1085 | struct drm_file *file_priv); |
1085 | struct drm_file *file_priv); |
1086 | int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, |
1086 | int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, |
1087 | struct drm_file *file_priv); |
1087 | struct drm_file *file_priv); |
1088 | int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, |
1088 | int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, |
1089 | struct drm_file *file_priv); |
1089 | struct drm_file *file_priv); |
1090 | int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
1090 | int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
1091 | struct drm_file *file_priv); |
1091 | struct drm_file *file_priv); |
1092 | int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
1092 | int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
1093 | struct drm_file *file_priv); |
1093 | struct drm_file *file_priv); |
1094 | int i915_gem_execbuffer(struct drm_device *dev, void *data, |
1094 | int i915_gem_execbuffer(struct drm_device *dev, void *data, |
1095 | struct drm_file *file_priv); |
1095 | struct drm_file *file_priv); |
1096 | int i915_gem_execbuffer2(struct drm_device *dev, void *data, |
1096 | int i915_gem_execbuffer2(struct drm_device *dev, void *data, |
1097 | struct drm_file *file_priv); |
1097 | struct drm_file *file_priv); |
1098 | int i915_gem_pin_ioctl(struct drm_device *dev, void *data, |
1098 | int i915_gem_pin_ioctl(struct drm_device *dev, void *data, |
1099 | struct drm_file *file_priv); |
1099 | struct drm_file *file_priv); |
1100 | int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, |
1100 | int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, |
1101 | struct drm_file *file_priv); |
1101 | struct drm_file *file_priv); |
1102 | int i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
1102 | int i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
1103 | struct drm_file *file_priv); |
1103 | struct drm_file *file_priv); |
1104 | int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, |
1104 | int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, |
1105 | struct drm_file *file_priv); |
1105 | struct drm_file *file_priv); |
1106 | int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, |
1106 | int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, |
1107 | struct drm_file *file_priv); |
1107 | struct drm_file *file_priv); |
1108 | int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, |
1108 | int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, |
1109 | struct drm_file *file_priv); |
1109 | struct drm_file *file_priv); |
1110 | int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, |
1110 | int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, |
1111 | struct drm_file *file_priv); |
1111 | struct drm_file *file_priv); |
1112 | int i915_gem_set_tiling(struct drm_device *dev, void *data, |
1112 | int i915_gem_set_tiling(struct drm_device *dev, void *data, |
1113 | struct drm_file *file_priv); |
1113 | struct drm_file *file_priv); |
1114 | int i915_gem_get_tiling(struct drm_device *dev, void *data, |
1114 | int i915_gem_get_tiling(struct drm_device *dev, void *data, |
1115 | struct drm_file *file_priv); |
1115 | struct drm_file *file_priv); |
1116 | int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
1116 | int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
1117 | struct drm_file *file_priv); |
1117 | struct drm_file *file_priv); |
1118 | void i915_gem_load(struct drm_device *dev); |
1118 | void i915_gem_load(struct drm_device *dev); |
1119 | int i915_gem_init_object(struct drm_gem_object *obj); |
1119 | int i915_gem_init_object(struct drm_gem_object *obj); |
1120 | int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring, |
1120 | int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring, |
1121 | uint32_t invalidate_domains, |
1121 | uint32_t invalidate_domains, |
1122 | uint32_t flush_domains); |
1122 | uint32_t flush_domains); |
1123 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
1123 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
1124 | size_t size); |
1124 | size_t size); |
1125 | void i915_gem_free_object(struct drm_gem_object *obj); |
1125 | void i915_gem_free_object(struct drm_gem_object *obj); |
1126 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, |
1126 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, |
1127 | uint32_t alignment, |
1127 | uint32_t alignment, |
1128 | bool map_and_fenceable); |
1128 | bool map_and_fenceable); |
1129 | void i915_gem_object_unpin(struct drm_i915_gem_object *obj); |
1129 | void i915_gem_object_unpin(struct drm_i915_gem_object *obj); |
1130 | int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); |
1130 | int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); |
1131 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
1131 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
1132 | void i915_gem_lastclose(struct drm_device *dev); |
1132 | void i915_gem_lastclose(struct drm_device *dev); |
1133 | 1133 | ||
1134 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); |
1134 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); |
1135 | int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); |
1135 | int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); |
1136 | void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
1136 | void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
1137 | struct intel_ring_buffer *ring, |
1137 | struct intel_ring_buffer *ring, |
1138 | u32 seqno); |
1138 | u32 seqno); |
1139 | 1139 | ||
1140 | int i915_gem_dumb_create(struct drm_file *file_priv, |
1140 | int i915_gem_dumb_create(struct drm_file *file_priv, |
1141 | struct drm_device *dev, |
1141 | struct drm_device *dev, |
1142 | struct drm_mode_create_dumb *args); |
1142 | struct drm_mode_create_dumb *args); |
1143 | int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, |
1143 | int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, |
1144 | uint32_t handle, uint64_t *offset); |
1144 | uint32_t handle, uint64_t *offset); |
1145 | int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, |
1145 | int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, |
1146 | uint32_t handle); |
1146 | uint32_t handle); |
1147 | /** |
1147 | /** |
1148 | * Returns true if seq1 is later than seq2. |
1148 | * Returns true if seq1 is later than seq2. |
1149 | */ |
1149 | */ |
1150 | //static inline bool |
1150 | //static inline bool |
1151 | //i915_seqno_passed(uint32_t seq1, uint32_t seq2) |
1151 | //i915_seqno_passed(uint32_t seq1, uint32_t seq2) |
1152 | //{ |
1152 | //{ |
1153 | // return (int32_t)(seq1 - seq2) >= 0; |
1153 | // return (int32_t)(seq1 - seq2) >= 0; |
1154 | //} |
1154 | //} |
1155 | 1155 | ||
1156 | static inline u32 |
1156 | static inline u32 |
1157 | i915_gem_next_request_seqno(struct intel_ring_buffer *ring) |
1157 | i915_gem_next_request_seqno(struct intel_ring_buffer *ring) |
1158 | { |
1158 | { |
1159 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1159 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1160 | return ring->outstanding_lazy_request = dev_priv->next_seqno; |
1160 | return ring->outstanding_lazy_request = dev_priv->next_seqno; |
1161 | } |
1161 | } |
1162 | 1162 | ||
1163 | 1163 | ||
1164 | void i915_gem_retire_requests(struct drm_device *dev); |
1164 | void i915_gem_retire_requests(struct drm_device *dev); |
1165 | void i915_gem_reset(struct drm_device *dev); |
1165 | void i915_gem_reset(struct drm_device *dev); |
1166 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj); |
1166 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj); |
1167 | int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, |
1167 | int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, |
1168 | uint32_t read_domains, |
1168 | uint32_t read_domains, |
1169 | uint32_t write_domain); |
1169 | uint32_t write_domain); |
1170 | int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); |
1170 | int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); |
1171 | int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); |
1171 | int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); |
1172 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
1172 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
1173 | void i915_gem_do_init(struct drm_device *dev, |
1173 | void i915_gem_do_init(struct drm_device *dev, |
1174 | unsigned long start, |
1174 | unsigned long start, |
1175 | unsigned long mappable_end, |
1175 | unsigned long mappable_end, |
1176 | unsigned long end); |
1176 | unsigned long end); |
1177 | int __must_check i915_gpu_idle(struct drm_device *dev); |
1177 | int __must_check i915_gpu_idle(struct drm_device *dev); |
1178 | int __must_check i915_gem_idle(struct drm_device *dev); |
1178 | int __must_check i915_gem_idle(struct drm_device *dev); |
1179 | int __must_check i915_add_request(struct intel_ring_buffer *ring, |
1179 | int __must_check i915_add_request(struct intel_ring_buffer *ring, |
1180 | struct drm_file *file, |
1180 | struct drm_file *file, |
1181 | struct drm_i915_gem_request *request); |
1181 | struct drm_i915_gem_request *request); |
1182 | int __must_check i915_wait_request(struct intel_ring_buffer *ring, |
1182 | int __must_check i915_wait_request(struct intel_ring_buffer *ring, |
1183 | uint32_t seqno); |
1183 | uint32_t seqno); |
1184 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
1184 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
1185 | int __must_check |
1185 | int __must_check |
1186 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, |
1186 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, |
1187 | bool write); |
1187 | bool write); |
1188 | int __must_check |
1188 | int __must_check |
1189 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
1189 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
1190 | u32 alignment, |
1190 | u32 alignment, |
1191 | struct intel_ring_buffer *pipelined); |
1191 | struct intel_ring_buffer *pipelined); |
1192 | int i915_gem_attach_phys_object(struct drm_device *dev, |
1192 | int i915_gem_attach_phys_object(struct drm_device *dev, |
1193 | struct drm_i915_gem_object *obj, |
1193 | struct drm_i915_gem_object *obj, |
1194 | int id, |
1194 | int id, |
1195 | int align); |
1195 | int align); |
1196 | void i915_gem_detach_phys_object(struct drm_device *dev, |
1196 | void i915_gem_detach_phys_object(struct drm_device *dev, |
1197 | struct drm_i915_gem_object *obj); |
1197 | struct drm_i915_gem_object *obj); |
1198 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
1198 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
1199 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
1199 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
1200 | 1200 | ||
1201 | uint32_t |
1201 | uint32_t |
1202 | i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, |
1202 | i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, |
1203 | uint32_t size, |
1203 | uint32_t size, |
1204 | int tiling_mode); |
1204 | int tiling_mode); |
1205 | 1205 | ||
1206 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
1206 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
1207 | enum i915_cache_level cache_level); |
1207 | enum i915_cache_level cache_level); |
1208 | 1208 | ||
1209 | 1209 | ||
1210 | /* i915_gem_gtt.c */ |
1210 | /* i915_gem_gtt.c */ |
1211 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
1211 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
1212 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); |
1212 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); |
1213 | void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, |
1213 | void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, |
1214 | enum i915_cache_level cache_level); |
1214 | enum i915_cache_level cache_level); |
1215 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); |
1215 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); |
1216 | 1216 | ||
1217 | /* i915_gem_evict.c */ |
1217 | /* i915_gem_evict.c */ |
1218 | int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, |
1218 | int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, |
1219 | unsigned alignment, bool mappable); |
1219 | unsigned alignment, bool mappable); |
1220 | int __must_check i915_gem_evict_everything(struct drm_device *dev, |
1220 | int __must_check i915_gem_evict_everything(struct drm_device *dev, |
1221 | bool purgeable_only); |
1221 | bool purgeable_only); |
1222 | int __must_check i915_gem_evict_inactive(struct drm_device *dev, |
1222 | int __must_check i915_gem_evict_inactive(struct drm_device *dev, |
1223 | bool purgeable_only); |
1223 | bool purgeable_only); |
1224 | 1224 | ||
1225 | /* i915_gem_tiling.c */ |
1225 | /* i915_gem_tiling.c */ |
1226 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
1226 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
1227 | void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); |
1227 | void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); |
1228 | void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); |
1228 | void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); |
1229 | 1229 | ||
1230 | /* i915_gem_debug.c */ |
1230 | /* i915_gem_debug.c */ |
1231 | void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, |
1231 | void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, |
1232 | const char *where, uint32_t mark); |
1232 | const char *where, uint32_t mark); |
1233 | #if WATCH_LISTS |
1233 | #if WATCH_LISTS |
1234 | int i915_verify_lists(struct drm_device *dev); |
1234 | int i915_verify_lists(struct drm_device *dev); |
1235 | #else |
1235 | #else |
1236 | #define i915_verify_lists(dev) 0 |
1236 | #define i915_verify_lists(dev) 0 |
1237 | #endif |
1237 | #endif |
1238 | void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, |
1238 | void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, |
1239 | int handle); |
1239 | int handle); |
1240 | void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, |
1240 | void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, |
1241 | const char *where, uint32_t mark); |
1241 | const char *where, uint32_t mark); |
1242 | 1242 | ||
1243 | /* i915_debugfs.c */ |
1243 | /* i915_debugfs.c */ |
1244 | int i915_debugfs_init(struct drm_minor *minor); |
1244 | int i915_debugfs_init(struct drm_minor *minor); |
1245 | void i915_debugfs_cleanup(struct drm_minor *minor); |
1245 | void i915_debugfs_cleanup(struct drm_minor *minor); |
1246 | 1246 | ||
1247 | /* i915_suspend.c */ |
1247 | /* i915_suspend.c */ |
1248 | extern int i915_save_state(struct drm_device *dev); |
1248 | extern int i915_save_state(struct drm_device *dev); |
1249 | extern int i915_restore_state(struct drm_device *dev); |
1249 | extern int i915_restore_state(struct drm_device *dev); |
1250 | 1250 | ||
1251 | /* i915_suspend.c */ |
1251 | /* i915_suspend.c */ |
1252 | extern int i915_save_state(struct drm_device *dev); |
1252 | extern int i915_save_state(struct drm_device *dev); |
1253 | extern int i915_restore_state(struct drm_device *dev); |
1253 | extern int i915_restore_state(struct drm_device *dev); |
1254 | 1254 | ||
1255 | /* intel_i2c.c */ |
1255 | /* intel_i2c.c */ |
1256 | extern int intel_setup_gmbus(struct drm_device *dev); |
1256 | extern int intel_setup_gmbus(struct drm_device *dev); |
1257 | extern void intel_teardown_gmbus(struct drm_device *dev); |
1257 | extern void intel_teardown_gmbus(struct drm_device *dev); |
1258 | extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); |
1258 | extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); |
1259 | extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); |
1259 | extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); |
1260 | 1260 | ||
1261 | //extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) |
1261 | //extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) |
1262 | //{ |
1262 | //{ |
1263 | // return container_of(adapter, struct intel_gmbus, adapter)->force_bit; |
1263 | // return container_of(adapter, struct intel_gmbus, adapter)->force_bit; |
1264 | //} |
1264 | //} |
1265 | 1265 | ||
1266 | extern void intel_i2c_reset(struct drm_device *dev); |
1266 | extern void intel_i2c_reset(struct drm_device *dev); |
1267 | 1267 | ||
1268 | /* intel_opregion.c */ |
1268 | /* intel_opregion.c */ |
1269 | extern int intel_opregion_setup(struct drm_device *dev); |
1269 | extern int intel_opregion_setup(struct drm_device *dev); |
1270 | #ifdef CONFIG_ACPI |
1270 | #ifdef CONFIG_ACPI |
1271 | extern void intel_opregion_init(struct drm_device *dev); |
1271 | extern void intel_opregion_init(struct drm_device *dev); |
1272 | extern void intel_opregion_fini(struct drm_device *dev); |
1272 | extern void intel_opregion_fini(struct drm_device *dev); |
1273 | extern void intel_opregion_asle_intr(struct drm_device *dev); |
1273 | extern void intel_opregion_asle_intr(struct drm_device *dev); |
1274 | extern void intel_opregion_gse_intr(struct drm_device *dev); |
1274 | extern void intel_opregion_gse_intr(struct drm_device *dev); |
1275 | extern void intel_opregion_enable_asle(struct drm_device *dev); |
1275 | extern void intel_opregion_enable_asle(struct drm_device *dev); |
1276 | #else |
1276 | #else |
1277 | static inline void intel_opregion_init(struct drm_device *dev) { return; } |
1277 | static inline void intel_opregion_init(struct drm_device *dev) { return; } |
1278 | static inline void intel_opregion_fini(struct drm_device *dev) { return; } |
1278 | static inline void intel_opregion_fini(struct drm_device *dev) { return; } |
1279 | static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } |
1279 | static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } |
1280 | static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; } |
1280 | static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; } |
1281 | static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; } |
1281 | static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; } |
1282 | #endif |
1282 | #endif |
1283 | 1283 | ||
1284 | /* intel_acpi.c */ |
1284 | /* intel_acpi.c */ |
1285 | #ifdef CONFIG_ACPI |
1285 | #ifdef CONFIG_ACPI |
1286 | extern void intel_register_dsm_handler(void); |
1286 | extern void intel_register_dsm_handler(void); |
1287 | extern void intel_unregister_dsm_handler(void); |
1287 | extern void intel_unregister_dsm_handler(void); |
1288 | #else |
1288 | #else |
1289 | static inline void intel_register_dsm_handler(void) { return; } |
1289 | static inline void intel_register_dsm_handler(void) { return; } |
1290 | static inline void intel_unregister_dsm_handler(void) { return; } |
1290 | static inline void intel_unregister_dsm_handler(void) { return; } |
1291 | #endif /* CONFIG_ACPI */ |
1291 | #endif /* CONFIG_ACPI */ |
1292 | 1292 | ||
1293 | /* modesetting */ |
1293 | /* modesetting */ |
1294 | extern void intel_modeset_init(struct drm_device *dev); |
1294 | extern void intel_modeset_init(struct drm_device *dev); |
1295 | extern void intel_modeset_gem_init(struct drm_device *dev); |
1295 | extern void intel_modeset_gem_init(struct drm_device *dev); |
1296 | extern void intel_modeset_cleanup(struct drm_device *dev); |
1296 | extern void intel_modeset_cleanup(struct drm_device *dev); |
1297 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
1297 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
1298 | extern bool intel_fbc_enabled(struct drm_device *dev); |
1298 | extern bool intel_fbc_enabled(struct drm_device *dev); |
1299 | extern void intel_disable_fbc(struct drm_device *dev); |
1299 | extern void intel_disable_fbc(struct drm_device *dev); |
1300 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
1300 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
1301 | extern void ironlake_enable_rc6(struct drm_device *dev); |
1301 | extern void ironlake_enable_rc6(struct drm_device *dev); |
1302 | extern void gen6_set_rps(struct drm_device *dev, u8 val); |
1302 | extern void gen6_set_rps(struct drm_device *dev, u8 val); |
1303 | extern void intel_detect_pch (struct drm_device *dev); |
1303 | extern void intel_detect_pch (struct drm_device *dev); |
1304 | extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); |
1304 | extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); |
1305 | 1305 | ||
1306 | /* overlay */ |
1306 | /* overlay */ |
1307 | #ifdef CONFIG_DEBUG_FS |
1307 | #ifdef CONFIG_DEBUG_FS |
1308 | extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); |
1308 | extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); |
1309 | extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); |
1309 | extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); |
1310 | 1310 | ||
1311 | extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); |
1311 | extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); |
1312 | extern void intel_display_print_error_state(struct seq_file *m, |
1312 | extern void intel_display_print_error_state(struct seq_file *m, |
1313 | struct drm_device *dev, |
1313 | struct drm_device *dev, |
1314 | struct intel_display_error_state *error); |
1314 | struct intel_display_error_state *error); |
1315 | #endif |
1315 | #endif |
1316 | 1316 | ||
1317 | #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) |
1317 | #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) |
1318 | 1318 | ||
1319 | #define BEGIN_LP_RING(n) \ |
1319 | #define BEGIN_LP_RING(n) \ |
1320 | intel_ring_begin(LP_RING(dev_priv), (n)) |
1320 | intel_ring_begin(LP_RING(dev_priv), (n)) |
1321 | 1321 | ||
1322 | #define OUT_RING(x) \ |
1322 | #define OUT_RING(x) \ |
1323 | intel_ring_emit(LP_RING(dev_priv), x) |
1323 | intel_ring_emit(LP_RING(dev_priv), x) |
1324 | 1324 | ||
1325 | #define ADVANCE_LP_RING() \ |
1325 | #define ADVANCE_LP_RING() \ |
1326 | intel_ring_advance(LP_RING(dev_priv)) |
1326 | intel_ring_advance(LP_RING(dev_priv)) |
1327 | 1327 | ||
1328 | /** |
1328 | /** |
1329 | * Lock test for when it's just for synchronization of ring access. |
1329 | * Lock test for when it's just for synchronization of ring access. |
1330 | * |
1330 | * |
1331 | * In that case, we don't need to do it when GEM is initialized as nobody else |
1331 | * In that case, we don't need to do it when GEM is initialized as nobody else |
1332 | * has access to the ring. |
1332 | * has access to the ring. |
1333 | */ |
1333 | */ |
1334 | #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ |
1334 | #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ |
1335 | if (LP_RING(dev->dev_private)->obj == NULL) \ |
1335 | if (LP_RING(dev->dev_private)->obj == NULL) \ |
1336 | LOCK_TEST_WITH_RETURN(dev, file); \ |
1336 | LOCK_TEST_WITH_RETURN(dev, file); \ |
1337 | } while (0) |
1337 | } while (0) |
1338 | 1338 | ||
1339 | /* On SNB platform, before reading ring registers forcewake bit |
1339 | /* On SNB platform, before reading ring registers forcewake bit |
1340 | * must be set to prevent GT core from power down and stale values being |
1340 | * must be set to prevent GT core from power down and stale values being |
1341 | * returned. |
1341 | * returned. |
1342 | */ |
1342 | */ |
1343 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); |
1343 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); |
1344 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); |
1344 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); |
1345 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); |
1345 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); |
1346 | 1346 | ||
1347 | /* We give fast paths for the really cool registers */ |
1347 | /* We give fast paths for the really cool registers */ |
1348 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ |
1348 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ |
1349 | (((dev_priv)->info->gen >= 6) && \ |
1349 | (((dev_priv)->info->gen >= 6) && \ |
1350 | ((reg) < 0x40000) && \ |
1350 | ((reg) < 0x40000) && \ |
1351 | ((reg) != FORCEWAKE)) |
1351 | ((reg) != FORCEWAKE)) |
1352 | 1352 | ||
1353 | #define __i915_read(x, y) \ |
1353 | #define __i915_read(x, y) \ |
1354 | static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ |
1354 | static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ |
1355 | u##x val = 0; \ |
1355 | u##x val = 0; \ |
1356 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
1356 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
1357 | gen6_gt_force_wake_get(dev_priv); \ |
1357 | gen6_gt_force_wake_get(dev_priv); \ |
1358 | val = read##y(dev_priv->regs + reg); \ |
1358 | val = read##y(dev_priv->regs + reg); \ |
1359 | gen6_gt_force_wake_put(dev_priv); \ |
1359 | gen6_gt_force_wake_put(dev_priv); \ |
1360 | } else { \ |
1360 | } else { \ |
1361 | val = read##y(dev_priv->regs + reg); \ |
1361 | val = read##y(dev_priv->regs + reg); \ |
1362 | } \ |
1362 | } \ |
1363 | /* trace_i915_reg_rw(false, reg, val, sizeof(val)); */\ |
1363 | /* trace_i915_reg_rw(false, reg, val, sizeof(val)); */\ |
1364 | return val; \ |
1364 | return val; \ |
1365 | } |
1365 | } |
1366 | 1366 | ||
1367 | __i915_read(8, b) |
1367 | __i915_read(8, b) |
1368 | __i915_read(16, w) |
1368 | __i915_read(16, w) |
1369 | __i915_read(32, l) |
1369 | __i915_read(32, l) |
1370 | __i915_read(64, q) |
1370 | __i915_read(64, q) |
1371 | #undef __i915_read |
1371 | #undef __i915_read |
1372 | 1372 | ||
1373 | #define __i915_write(x, y) \ |
1373 | #define __i915_write(x, y) \ |
1374 | static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ |
1374 | static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ |
1375 | /* trace_i915_reg_rw(true, reg, val, sizeof(val));*/ \ |
1375 | /* trace_i915_reg_rw(true, reg, val, sizeof(val));*/ \ |
1376 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
1376 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
1377 | __gen6_gt_wait_for_fifo(dev_priv); \ |
1377 | __gen6_gt_wait_for_fifo(dev_priv); \ |
1378 | } \ |
1378 | } \ |
1379 | write##y(val, dev_priv->regs + reg); \ |
1379 | write##y(val, dev_priv->regs + reg); \ |
1380 | } |
1380 | } |
1381 | __i915_write(8, b) |
1381 | __i915_write(8, b) |
1382 | __i915_write(16, w) |
1382 | __i915_write(16, w) |
1383 | __i915_write(32, l) |
1383 | __i915_write(32, l) |
1384 | __i915_write(64, q) |
1384 | __i915_write(64, q) |
1385 | #undef __i915_write |
1385 | #undef __i915_write |
1386 | 1386 | ||
1387 | #define I915_READ8(reg) i915_read8(dev_priv, (reg)) |
1387 | #define I915_READ8(reg) i915_read8(dev_priv, (reg)) |
1388 | #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) |
1388 | #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) |
1389 | 1389 | ||
1390 | #define I915_READ16(reg) i915_read16(dev_priv, (reg)) |
1390 | #define I915_READ16(reg) i915_read16(dev_priv, (reg)) |
1391 | #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) |
1391 | #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) |
1392 | #define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg)) |
1392 | #define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg)) |
1393 | #define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg)) |
1393 | #define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg)) |
1394 | 1394 | ||
1395 | #define I915_READ(reg) i915_read32(dev_priv, (reg)) |
1395 | #define I915_READ(reg) i915_read32(dev_priv, (reg)) |
1396 | #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) |
1396 | #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) |
1397 | #define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg)) |
1397 | #define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg)) |
1398 | #define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg)) |
1398 | #define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg)) |
1399 | 1399 | ||
1400 | #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) |
1400 | #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) |
1401 | #define I915_READ64(reg) i915_read64(dev_priv, (reg)) |
1401 | #define I915_READ64(reg) i915_read64(dev_priv, (reg)) |
1402 | 1402 | ||
1403 | #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) |
1403 | #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) |
1404 | #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) |
1404 | #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) |
- | 1405 | ||
- | 1406 | typedef struct |
|
- | 1407 | { |
|
- | 1408 | int width; |
|
- | 1409 | int height; |
|
- | 1410 | int bpp; |
|
- | 1411 | int freq; |
|
1405 | 1412 | }videomode_t; |
|
1406 | 1413 | ||
1407 | #endif>1) |
1414 | #endif>1) |
1408 | 1415 | ||
1409 | struct><1) |
1416 | struct><1) |
1410 | 1417 | ||
1411 | struct>0) |
1418 | struct>0) |
1412 | #define><0) |
1419 | #define><0) |
1413 | #define>> |
1420 | #define>> |