Rev 4126 | Rev 4280 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4126 | Rev 4246 | ||
---|---|---|---|
1 | /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- |
1 | /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- |
2 | */ |
2 | */ |
3 | /* |
3 | /* |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
5 | * All Rights Reserved. |
5 | * All Rights Reserved. |
6 | * |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * Permission is hereby granted, free of charge, to any person obtaining a |
8 | * copy of this software and associated documentation files (the |
8 | * copy of this software and associated documentation files (the |
9 | * "Software"), to deal in the Software without restriction, including |
9 | * "Software"), to deal in the Software without restriction, including |
10 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * without limitation the rights to use, copy, modify, merge, publish, |
11 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * distribute, sub license, and/or sell copies of the Software, and to |
12 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * permit persons to whom the Software is furnished to do so, subject to |
13 | * the following conditions: |
13 | * the following conditions: |
14 | * |
14 | * |
15 | * The above copyright notice and this permission notice (including the |
15 | * The above copyright notice and this permission notice (including the |
16 | * next paragraph) shall be included in all copies or substantial portions |
16 | * next paragraph) shall be included in all copies or substantial portions |
17 | * of the Software. |
17 | * of the Software. |
18 | * |
18 | * |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
26 | * |
26 | * |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
30 | 30 | ||
31 | #include |
31 | #include |
32 | #include |
32 | #include |
33 | #include |
33 | #include |
34 | #include "intel_drv.h" |
34 | #include "intel_drv.h" |
35 | #include |
35 | #include |
36 | #include "i915_drv.h" |
36 | #include "i915_drv.h" |
37 | #include "i915_trace.h" |
37 | #include "i915_trace.h" |
38 | #include |
38 | #include |
39 | //#include |
39 | //#include |
40 | //#include |
40 | //#include |
41 | //#include |
41 | //#include |
42 | //#include |
42 | //#include |
43 | #include |
43 | #include |
44 | //#include |
44 | //#include |
45 | 45 | ||
46 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen); |
46 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen); |
47 | 47 | ||
48 | #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) |
48 | #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) |
49 | 49 | ||
50 | #define BEGIN_LP_RING(n) \ |
50 | #define BEGIN_LP_RING(n) \ |
51 | intel_ring_begin(LP_RING(dev_priv), (n)) |
51 | intel_ring_begin(LP_RING(dev_priv), (n)) |
52 | 52 | ||
53 | #define OUT_RING(x) \ |
53 | #define OUT_RING(x) \ |
54 | intel_ring_emit(LP_RING(dev_priv), x) |
54 | intel_ring_emit(LP_RING(dev_priv), x) |
55 | 55 | ||
56 | #define ADVANCE_LP_RING() \ |
56 | #define ADVANCE_LP_RING() \ |
57 | intel_ring_advance(LP_RING(dev_priv)) |
57 | intel_ring_advance(LP_RING(dev_priv)) |
58 | 58 | ||
59 | /** |
59 | /** |
60 | * Lock test for when it's just for synchronization of ring access. |
60 | * Lock test for when it's just for synchronization of ring access. |
61 | * |
61 | * |
62 | * In that case, we don't need to do it when GEM is initialized as nobody else |
62 | * In that case, we don't need to do it when GEM is initialized as nobody else |
63 | * has access to the ring. |
63 | * has access to the ring. |
64 | */ |
64 | */ |
65 | #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ |
65 | #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ |
66 | if (LP_RING(dev->dev_private)->obj == NULL) \ |
66 | if (LP_RING(dev->dev_private)->obj == NULL) \ |
67 | LOCK_TEST_WITH_RETURN(dev, file); \ |
67 | LOCK_TEST_WITH_RETURN(dev, file); \ |
68 | } while (0) |
68 | } while (0) |
69 | 69 | ||
70 | static inline u32 |
70 | static inline u32 |
71 | intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg) |
71 | intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg) |
72 | { |
72 | { |
73 | if (I915_NEED_GFX_HWS(dev_priv->dev)) |
73 | if (I915_NEED_GFX_HWS(dev_priv->dev)) |
74 | return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg); |
74 | return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg); |
75 | else |
75 | else |
76 | return intel_read_status_page(LP_RING(dev_priv), reg); |
76 | return intel_read_status_page(LP_RING(dev_priv), reg); |
77 | } |
77 | } |
78 | 78 | ||
79 | #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg) |
79 | #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg) |
80 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) |
80 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) |
81 | #define I915_BREADCRUMB_INDEX 0x21 |
81 | #define I915_BREADCRUMB_INDEX 0x21 |
82 | 82 | ||
83 | void i915_update_dri1_breadcrumb(struct drm_device *dev) |
83 | void i915_update_dri1_breadcrumb(struct drm_device *dev) |
84 | { |
84 | { |
85 | drm_i915_private_t *dev_priv = dev->dev_private; |
85 | drm_i915_private_t *dev_priv = dev->dev_private; |
86 | struct drm_i915_master_private *master_priv; |
86 | struct drm_i915_master_private *master_priv; |
87 | 87 | ||
88 | if (dev->primary->master) { |
88 | if (dev->primary->master) { |
89 | master_priv = dev->primary->master->driver_priv; |
89 | master_priv = dev->primary->master->driver_priv; |
90 | if (master_priv->sarea_priv) |
90 | if (master_priv->sarea_priv) |
91 | master_priv->sarea_priv->last_dispatch = |
91 | master_priv->sarea_priv->last_dispatch = |
92 | READ_BREADCRUMB(dev_priv); |
92 | READ_BREADCRUMB(dev_priv); |
93 | } |
93 | } |
94 | } |
94 | } |
95 | 95 | ||
96 | static void i915_write_hws_pga(struct drm_device *dev) |
96 | static void i915_write_hws_pga(struct drm_device *dev) |
97 | { |
97 | { |
98 | drm_i915_private_t *dev_priv = dev->dev_private; |
98 | drm_i915_private_t *dev_priv = dev->dev_private; |
99 | u32 addr; |
99 | u32 addr; |
100 | 100 | ||
101 | addr = dev_priv->status_page_dmah->busaddr; |
101 | addr = dev_priv->status_page_dmah->busaddr; |
102 | if (INTEL_INFO(dev)->gen >= 4) |
102 | if (INTEL_INFO(dev)->gen >= 4) |
103 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; |
103 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; |
104 | I915_WRITE(HWS_PGA, addr); |
104 | I915_WRITE(HWS_PGA, addr); |
105 | } |
105 | } |
106 | 106 | ||
107 | /** |
107 | /** |
108 | * Frees the hardware status page, whether it's a physical address or a virtual |
108 | * Frees the hardware status page, whether it's a physical address or a virtual |
109 | * address set up by the X Server. |
109 | * address set up by the X Server. |
110 | */ |
110 | */ |
111 | static void i915_free_hws(struct drm_device *dev) |
111 | static void i915_free_hws(struct drm_device *dev) |
112 | { |
112 | { |
113 | drm_i915_private_t *dev_priv = dev->dev_private; |
113 | drm_i915_private_t *dev_priv = dev->dev_private; |
114 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
114 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
115 | 115 | ||
116 | if (dev_priv->status_page_dmah) { |
116 | if (dev_priv->status_page_dmah) { |
117 | drm_pci_free(dev, dev_priv->status_page_dmah); |
117 | drm_pci_free(dev, dev_priv->status_page_dmah); |
118 | dev_priv->status_page_dmah = NULL; |
118 | dev_priv->status_page_dmah = NULL; |
119 | } |
119 | } |
120 | 120 | ||
121 | if (ring->status_page.gfx_addr) { |
121 | if (ring->status_page.gfx_addr) { |
122 | ring->status_page.gfx_addr = 0; |
122 | ring->status_page.gfx_addr = 0; |
123 | iounmap(dev_priv->dri1.gfx_hws_cpu_addr); |
123 | iounmap(dev_priv->dri1.gfx_hws_cpu_addr); |
124 | } |
124 | } |
125 | 125 | ||
126 | /* Need to rewrite hardware status page */ |
126 | /* Need to rewrite hardware status page */ |
127 | I915_WRITE(HWS_PGA, 0x1ffff000); |
127 | I915_WRITE(HWS_PGA, 0x1ffff000); |
128 | } |
128 | } |
129 | 129 | ||
130 | #if 0 |
130 | #if 0 |
131 | 131 | ||
132 | void i915_kernel_lost_context(struct drm_device * dev) |
132 | void i915_kernel_lost_context(struct drm_device * dev) |
133 | { |
133 | { |
134 | drm_i915_private_t *dev_priv = dev->dev_private; |
134 | drm_i915_private_t *dev_priv = dev->dev_private; |
135 | struct drm_i915_master_private *master_priv; |
135 | struct drm_i915_master_private *master_priv; |
136 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
136 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
137 | 137 | ||
138 | /* |
138 | /* |
139 | * We should never lose context on the ring with modesetting |
139 | * We should never lose context on the ring with modesetting |
140 | * as we don't expose it to userspace |
140 | * as we don't expose it to userspace |
141 | */ |
141 | */ |
142 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
142 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
143 | return; |
143 | return; |
144 | 144 | ||
145 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
145 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
146 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
146 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
147 | ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE); |
147 | ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE); |
148 | if (ring->space < 0) |
148 | if (ring->space < 0) |
149 | ring->space += ring->size; |
149 | ring->space += ring->size; |
150 | 150 | ||
151 | if (!dev->primary->master) |
151 | if (!dev->primary->master) |
152 | return; |
152 | return; |
153 | 153 | ||
154 | master_priv = dev->primary->master->driver_priv; |
154 | master_priv = dev->primary->master->driver_priv; |
155 | if (ring->head == ring->tail && master_priv->sarea_priv) |
155 | if (ring->head == ring->tail && master_priv->sarea_priv) |
156 | master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; |
156 | master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; |
157 | } |
157 | } |
158 | 158 | ||
159 | static int i915_dma_cleanup(struct drm_device * dev) |
159 | static int i915_dma_cleanup(struct drm_device * dev) |
160 | { |
160 | { |
161 | drm_i915_private_t *dev_priv = dev->dev_private; |
161 | drm_i915_private_t *dev_priv = dev->dev_private; |
162 | int i; |
162 | int i; |
163 | 163 | ||
164 | /* Make sure interrupts are disabled here because the uninstall ioctl |
164 | /* Make sure interrupts are disabled here because the uninstall ioctl |
165 | * may not have been called from userspace and after dev_private |
165 | * may not have been called from userspace and after dev_private |
166 | * is freed, it's too late. |
166 | * is freed, it's too late. |
167 | */ |
167 | */ |
168 | if (dev->irq_enabled) |
168 | if (dev->irq_enabled) |
169 | drm_irq_uninstall(dev); |
169 | drm_irq_uninstall(dev); |
170 | 170 | ||
171 | mutex_lock(&dev->struct_mutex); |
171 | mutex_lock(&dev->struct_mutex); |
172 | for (i = 0; i < I915_NUM_RINGS; i++) |
172 | for (i = 0; i < I915_NUM_RINGS; i++) |
173 | intel_cleanup_ring_buffer(&dev_priv->ring[i]); |
173 | intel_cleanup_ring_buffer(&dev_priv->ring[i]); |
174 | mutex_unlock(&dev->struct_mutex); |
174 | mutex_unlock(&dev->struct_mutex); |
175 | 175 | ||
176 | /* Clear the HWS virtual address at teardown */ |
176 | /* Clear the HWS virtual address at teardown */ |
177 | if (I915_NEED_GFX_HWS(dev)) |
177 | if (I915_NEED_GFX_HWS(dev)) |
178 | i915_free_hws(dev); |
178 | i915_free_hws(dev); |
179 | 179 | ||
180 | return 0; |
180 | return 0; |
181 | } |
181 | } |
182 | 182 | ||
183 | static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) |
183 | static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) |
184 | { |
184 | { |
185 | drm_i915_private_t *dev_priv = dev->dev_private; |
185 | drm_i915_private_t *dev_priv = dev->dev_private; |
186 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
186 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
187 | int ret; |
187 | int ret; |
188 | 188 | ||
189 | master_priv->sarea = drm_getsarea(dev); |
189 | master_priv->sarea = drm_getsarea(dev); |
190 | if (master_priv->sarea) { |
190 | if (master_priv->sarea) { |
191 | master_priv->sarea_priv = (drm_i915_sarea_t *) |
191 | master_priv->sarea_priv = (drm_i915_sarea_t *) |
192 | ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); |
192 | ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); |
193 | } else { |
193 | } else { |
194 | DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); |
194 | DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); |
195 | } |
195 | } |
196 | 196 | ||
197 | if (init->ring_size != 0) { |
197 | if (init->ring_size != 0) { |
198 | if (LP_RING(dev_priv)->obj != NULL) { |
198 | if (LP_RING(dev_priv)->obj != NULL) { |
199 | i915_dma_cleanup(dev); |
199 | i915_dma_cleanup(dev); |
200 | DRM_ERROR("Client tried to initialize ringbuffer in " |
200 | DRM_ERROR("Client tried to initialize ringbuffer in " |
201 | "GEM mode\n"); |
201 | "GEM mode\n"); |
202 | return -EINVAL; |
202 | return -EINVAL; |
203 | } |
203 | } |
204 | 204 | ||
205 | ret = intel_render_ring_init_dri(dev, |
205 | ret = intel_render_ring_init_dri(dev, |
206 | init->ring_start, |
206 | init->ring_start, |
207 | init->ring_size); |
207 | init->ring_size); |
208 | if (ret) { |
208 | if (ret) { |
209 | i915_dma_cleanup(dev); |
209 | i915_dma_cleanup(dev); |
210 | return ret; |
210 | return ret; |
211 | } |
211 | } |
212 | } |
212 | } |
213 | 213 | ||
214 | dev_priv->dri1.cpp = init->cpp; |
214 | dev_priv->dri1.cpp = init->cpp; |
215 | dev_priv->dri1.back_offset = init->back_offset; |
215 | dev_priv->dri1.back_offset = init->back_offset; |
216 | dev_priv->dri1.front_offset = init->front_offset; |
216 | dev_priv->dri1.front_offset = init->front_offset; |
217 | dev_priv->dri1.current_page = 0; |
217 | dev_priv->dri1.current_page = 0; |
218 | if (master_priv->sarea_priv) |
218 | if (master_priv->sarea_priv) |
219 | master_priv->sarea_priv->pf_current_page = 0; |
219 | master_priv->sarea_priv->pf_current_page = 0; |
220 | 220 | ||
221 | /* Allow hardware batchbuffers unless told otherwise. |
221 | /* Allow hardware batchbuffers unless told otherwise. |
222 | */ |
222 | */ |
223 | dev_priv->dri1.allow_batchbuffer = 1; |
223 | dev_priv->dri1.allow_batchbuffer = 1; |
224 | 224 | ||
225 | return 0; |
225 | return 0; |
226 | } |
226 | } |
227 | 227 | ||
228 | static int i915_dma_resume(struct drm_device * dev) |
228 | static int i915_dma_resume(struct drm_device * dev) |
229 | { |
229 | { |
230 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
230 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
231 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
231 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
232 | 232 | ||
233 | DRM_DEBUG_DRIVER("%s\n", __func__); |
233 | DRM_DEBUG_DRIVER("%s\n", __func__); |
234 | 234 | ||
235 | if (ring->virtual_start == NULL) { |
235 | if (ring->virtual_start == NULL) { |
236 | DRM_ERROR("can not ioremap virtual address for" |
236 | DRM_ERROR("can not ioremap virtual address for" |
237 | " ring buffer\n"); |
237 | " ring buffer\n"); |
238 | return -ENOMEM; |
238 | return -ENOMEM; |
239 | } |
239 | } |
240 | 240 | ||
241 | /* Program Hardware Status Page */ |
241 | /* Program Hardware Status Page */ |
242 | if (!ring->status_page.page_addr) { |
242 | if (!ring->status_page.page_addr) { |
243 | DRM_ERROR("Can not find hardware status page\n"); |
243 | DRM_ERROR("Can not find hardware status page\n"); |
244 | return -EINVAL; |
244 | return -EINVAL; |
245 | } |
245 | } |
246 | DRM_DEBUG_DRIVER("hw status page @ %p\n", |
246 | DRM_DEBUG_DRIVER("hw status page @ %p\n", |
247 | ring->status_page.page_addr); |
247 | ring->status_page.page_addr); |
248 | if (ring->status_page.gfx_addr != 0) |
248 | if (ring->status_page.gfx_addr != 0) |
249 | intel_ring_setup_status_page(ring); |
249 | intel_ring_setup_status_page(ring); |
250 | else |
250 | else |
251 | i915_write_hws_pga(dev); |
251 | i915_write_hws_pga(dev); |
252 | 252 | ||
253 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
253 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
254 | 254 | ||
255 | return 0; |
255 | return 0; |
256 | } |
256 | } |
257 | 257 | ||
258 | static int i915_dma_init(struct drm_device *dev, void *data, |
258 | static int i915_dma_init(struct drm_device *dev, void *data, |
259 | struct drm_file *file_priv) |
259 | struct drm_file *file_priv) |
260 | { |
260 | { |
261 | drm_i915_init_t *init = data; |
261 | drm_i915_init_t *init = data; |
262 | int retcode = 0; |
262 | int retcode = 0; |
263 | 263 | ||
264 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
264 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
265 | return -ENODEV; |
265 | return -ENODEV; |
266 | 266 | ||
267 | switch (init->func) { |
267 | switch (init->func) { |
268 | case I915_INIT_DMA: |
268 | case I915_INIT_DMA: |
269 | retcode = i915_initialize(dev, init); |
269 | retcode = i915_initialize(dev, init); |
270 | break; |
270 | break; |
271 | case I915_CLEANUP_DMA: |
271 | case I915_CLEANUP_DMA: |
272 | retcode = i915_dma_cleanup(dev); |
272 | retcode = i915_dma_cleanup(dev); |
273 | break; |
273 | break; |
274 | case I915_RESUME_DMA: |
274 | case I915_RESUME_DMA: |
275 | retcode = i915_dma_resume(dev); |
275 | retcode = i915_dma_resume(dev); |
276 | break; |
276 | break; |
277 | default: |
277 | default: |
278 | retcode = -EINVAL; |
278 | retcode = -EINVAL; |
279 | break; |
279 | break; |
280 | } |
280 | } |
281 | 281 | ||
282 | return retcode; |
282 | return retcode; |
283 | } |
283 | } |
284 | 284 | ||
285 | /* Implement basically the same security restrictions as hardware does |
285 | /* Implement basically the same security restrictions as hardware does |
286 | * for MI_BATCH_NON_SECURE. These can be made stricter at any time. |
286 | * for MI_BATCH_NON_SECURE. These can be made stricter at any time. |
287 | * |
287 | * |
288 | * Most of the calculations below involve calculating the size of a |
288 | * Most of the calculations below involve calculating the size of a |
289 | * particular instruction. It's important to get the size right as |
289 | * particular instruction. It's important to get the size right as |
290 | * that tells us where the next instruction to check is. Any illegal |
290 | * that tells us where the next instruction to check is. Any illegal |
291 | * instruction detected will be given a size of zero, which is a |
291 | * instruction detected will be given a size of zero, which is a |
292 | * signal to abort the rest of the buffer. |
292 | * signal to abort the rest of the buffer. |
293 | */ |
293 | */ |
294 | static int validate_cmd(int cmd) |
294 | static int validate_cmd(int cmd) |
295 | { |
295 | { |
296 | switch (((cmd >> 29) & 0x7)) { |
296 | switch (((cmd >> 29) & 0x7)) { |
297 | case 0x0: |
297 | case 0x0: |
298 | switch ((cmd >> 23) & 0x3f) { |
298 | switch ((cmd >> 23) & 0x3f) { |
299 | case 0x0: |
299 | case 0x0: |
300 | return 1; /* MI_NOOP */ |
300 | return 1; /* MI_NOOP */ |
301 | case 0x4: |
301 | case 0x4: |
302 | return 1; /* MI_FLUSH */ |
302 | return 1; /* MI_FLUSH */ |
303 | default: |
303 | default: |
304 | return 0; /* disallow everything else */ |
304 | return 0; /* disallow everything else */ |
305 | } |
305 | } |
306 | break; |
306 | break; |
307 | case 0x1: |
307 | case 0x1: |
308 | return 0; /* reserved */ |
308 | return 0; /* reserved */ |
309 | case 0x2: |
309 | case 0x2: |
310 | return (cmd & 0xff) + 2; /* 2d commands */ |
310 | return (cmd & 0xff) + 2; /* 2d commands */ |
311 | case 0x3: |
311 | case 0x3: |
312 | if (((cmd >> 24) & 0x1f) <= 0x18) |
312 | if (((cmd >> 24) & 0x1f) <= 0x18) |
313 | return 1; |
313 | return 1; |
314 | 314 | ||
315 | switch ((cmd >> 24) & 0x1f) { |
315 | switch ((cmd >> 24) & 0x1f) { |
316 | case 0x1c: |
316 | case 0x1c: |
317 | return 1; |
317 | return 1; |
318 | case 0x1d: |
318 | case 0x1d: |
319 | switch ((cmd >> 16) & 0xff) { |
319 | switch ((cmd >> 16) & 0xff) { |
320 | case 0x3: |
320 | case 0x3: |
321 | return (cmd & 0x1f) + 2; |
321 | return (cmd & 0x1f) + 2; |
322 | case 0x4: |
322 | case 0x4: |
323 | return (cmd & 0xf) + 2; |
323 | return (cmd & 0xf) + 2; |
324 | default: |
324 | default: |
325 | return (cmd & 0xffff) + 2; |
325 | return (cmd & 0xffff) + 2; |
326 | } |
326 | } |
327 | case 0x1e: |
327 | case 0x1e: |
328 | if (cmd & (1 << 23)) |
328 | if (cmd & (1 << 23)) |
329 | return (cmd & 0xffff) + 1; |
329 | return (cmd & 0xffff) + 1; |
330 | else |
330 | else |
331 | return 1; |
331 | return 1; |
332 | case 0x1f: |
332 | case 0x1f: |
333 | if ((cmd & (1 << 23)) == 0) /* inline vertices */ |
333 | if ((cmd & (1 << 23)) == 0) /* inline vertices */ |
334 | return (cmd & 0x1ffff) + 2; |
334 | return (cmd & 0x1ffff) + 2; |
335 | else if (cmd & (1 << 17)) /* indirect random */ |
335 | else if (cmd & (1 << 17)) /* indirect random */ |
336 | if ((cmd & 0xffff) == 0) |
336 | if ((cmd & 0xffff) == 0) |
337 | return 0; /* unknown length, too hard */ |
337 | return 0; /* unknown length, too hard */ |
338 | else |
338 | else |
339 | return (((cmd & 0xffff) + 1) / 2) + 1; |
339 | return (((cmd & 0xffff) + 1) / 2) + 1; |
340 | else |
340 | else |
341 | return 2; /* indirect sequential */ |
341 | return 2; /* indirect sequential */ |
342 | default: |
342 | default: |
343 | return 0; |
343 | return 0; |
344 | } |
344 | } |
345 | default: |
345 | default: |
346 | return 0; |
346 | return 0; |
347 | } |
347 | } |
348 | 348 | ||
349 | return 0; |
349 | return 0; |
350 | } |
350 | } |
351 | 351 | ||
352 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) |
352 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) |
353 | { |
353 | { |
354 | drm_i915_private_t *dev_priv = dev->dev_private; |
354 | drm_i915_private_t *dev_priv = dev->dev_private; |
355 | int i, ret; |
355 | int i, ret; |
356 | 356 | ||
357 | if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) |
357 | if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) |
358 | return -EINVAL; |
358 | return -EINVAL; |
359 | 359 | ||
360 | for (i = 0; i < dwords;) { |
360 | for (i = 0; i < dwords;) { |
361 | int sz = validate_cmd(buffer[i]); |
361 | int sz = validate_cmd(buffer[i]); |
362 | if (sz == 0 || i + sz > dwords) |
362 | if (sz == 0 || i + sz > dwords) |
363 | return -EINVAL; |
363 | return -EINVAL; |
364 | i += sz; |
364 | i += sz; |
365 | } |
365 | } |
366 | 366 | ||
367 | ret = BEGIN_LP_RING((dwords+1)&~1); |
367 | ret = BEGIN_LP_RING((dwords+1)&~1); |
368 | if (ret) |
368 | if (ret) |
369 | return ret; |
369 | return ret; |
370 | 370 | ||
371 | for (i = 0; i < dwords; i++) |
371 | for (i = 0; i < dwords; i++) |
372 | OUT_RING(buffer[i]); |
372 | OUT_RING(buffer[i]); |
373 | if (dwords & 1) |
373 | if (dwords & 1) |
374 | OUT_RING(0); |
374 | OUT_RING(0); |
375 | 375 | ||
376 | ADVANCE_LP_RING(); |
376 | ADVANCE_LP_RING(); |
377 | 377 | ||
378 | return 0; |
378 | return 0; |
379 | } |
379 | } |
- | 380 | #endif |
|
380 | 381 | ||
381 | int |
382 | int |
382 | i915_emit_box(struct drm_device *dev, |
383 | i915_emit_box(struct drm_device *dev, |
383 | struct drm_clip_rect *box, |
384 | struct drm_clip_rect *box, |
384 | int DR1, int DR4) |
385 | int DR1, int DR4) |
385 | { |
386 | { |
386 | struct drm_i915_private *dev_priv = dev->dev_private; |
387 | struct drm_i915_private *dev_priv = dev->dev_private; |
387 | int ret; |
388 | int ret; |
388 | 389 | ||
389 | if (box->y2 <= box->y1 || box->x2 <= box->x1 || |
390 | if (box->y2 <= box->y1 || box->x2 <= box->x1 || |
390 | box->y2 <= 0 || box->x2 <= 0) { |
391 | box->y2 <= 0 || box->x2 <= 0) { |
391 | DRM_ERROR("Bad box %d,%d..%d,%d\n", |
392 | DRM_ERROR("Bad box %d,%d..%d,%d\n", |
392 | box->x1, box->y1, box->x2, box->y2); |
393 | box->x1, box->y1, box->x2, box->y2); |
393 | return -EINVAL; |
394 | return -EINVAL; |
394 | } |
395 | } |
395 | 396 | ||
396 | if (INTEL_INFO(dev)->gen >= 4) { |
397 | if (INTEL_INFO(dev)->gen >= 4) { |
397 | ret = BEGIN_LP_RING(4); |
398 | ret = BEGIN_LP_RING(4); |
398 | if (ret) |
399 | if (ret) |
399 | return ret; |
400 | return ret; |
400 | 401 | ||
401 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); |
402 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); |
402 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
403 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
403 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
404 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
404 | OUT_RING(DR4); |
405 | OUT_RING(DR4); |
405 | } else { |
406 | } else { |
406 | ret = BEGIN_LP_RING(6); |
407 | ret = BEGIN_LP_RING(6); |
407 | if (ret) |
408 | if (ret) |
408 | return ret; |
409 | return ret; |
409 | 410 | ||
410 | OUT_RING(GFX_OP_DRAWRECT_INFO); |
411 | OUT_RING(GFX_OP_DRAWRECT_INFO); |
411 | OUT_RING(DR1); |
412 | OUT_RING(DR1); |
412 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
413 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
413 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
414 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
414 | OUT_RING(DR4); |
415 | OUT_RING(DR4); |
415 | OUT_RING(0); |
416 | OUT_RING(0); |
416 | } |
417 | } |
417 | ADVANCE_LP_RING(); |
418 | ADVANCE_LP_RING(); |
418 | 419 | ||
419 | return 0; |
420 | return 0; |
420 | } |
421 | } |
- | 422 | ||
421 | 423 | #if 0 |
|
422 | /* XXX: Emitting the counter should really be moved to part of the IRQ |
424 | /* XXX: Emitting the counter should really be moved to part of the IRQ |
423 | * emit. For now, do it in both places: |
425 | * emit. For now, do it in both places: |
424 | */ |
426 | */ |
425 | 427 | ||
426 | static void i915_emit_breadcrumb(struct drm_device *dev) |
428 | static void i915_emit_breadcrumb(struct drm_device *dev) |
427 | { |
429 | { |
428 | drm_i915_private_t *dev_priv = dev->dev_private; |
430 | drm_i915_private_t *dev_priv = dev->dev_private; |
429 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
431 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
430 | 432 | ||
431 | dev_priv->dri1.counter++; |
433 | dev_priv->dri1.counter++; |
432 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) |
434 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) |
433 | dev_priv->dri1.counter = 0; |
435 | dev_priv->dri1.counter = 0; |
434 | if (master_priv->sarea_priv) |
436 | if (master_priv->sarea_priv) |
435 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; |
437 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; |
436 | 438 | ||
437 | if (BEGIN_LP_RING(4) == 0) { |
439 | if (BEGIN_LP_RING(4) == 0) { |
438 | OUT_RING(MI_STORE_DWORD_INDEX); |
440 | OUT_RING(MI_STORE_DWORD_INDEX); |
439 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
441 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
440 | OUT_RING(dev_priv->dri1.counter); |
442 | OUT_RING(dev_priv->dri1.counter); |
441 | OUT_RING(0); |
443 | OUT_RING(0); |
442 | ADVANCE_LP_RING(); |
444 | ADVANCE_LP_RING(); |
443 | } |
445 | } |
444 | } |
446 | } |
445 | 447 | ||
446 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, |
448 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, |
447 | drm_i915_cmdbuffer_t *cmd, |
449 | drm_i915_cmdbuffer_t *cmd, |
448 | struct drm_clip_rect *cliprects, |
450 | struct drm_clip_rect *cliprects, |
449 | void *cmdbuf) |
451 | void *cmdbuf) |
450 | { |
452 | { |
451 | int nbox = cmd->num_cliprects; |
453 | int nbox = cmd->num_cliprects; |
452 | int i = 0, count, ret; |
454 | int i = 0, count, ret; |
453 | 455 | ||
454 | if (cmd->sz & 0x3) { |
456 | if (cmd->sz & 0x3) { |
455 | DRM_ERROR("alignment"); |
457 | DRM_ERROR("alignment"); |
456 | return -EINVAL; |
458 | return -EINVAL; |
457 | } |
459 | } |
458 | 460 | ||
459 | i915_kernel_lost_context(dev); |
461 | i915_kernel_lost_context(dev); |
460 | 462 | ||
461 | count = nbox ? nbox : 1; |
463 | count = nbox ? nbox : 1; |
462 | 464 | ||
463 | for (i = 0; i < count; i++) { |
465 | for (i = 0; i < count; i++) { |
464 | if (i < nbox) { |
466 | if (i < nbox) { |
465 | ret = i915_emit_box(dev, &cliprects[i], |
467 | ret = i915_emit_box(dev, &cliprects[i], |
466 | cmd->DR1, cmd->DR4); |
468 | cmd->DR1, cmd->DR4); |
467 | if (ret) |
469 | if (ret) |
468 | return ret; |
470 | return ret; |
469 | } |
471 | } |
470 | 472 | ||
471 | ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); |
473 | ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); |
472 | if (ret) |
474 | if (ret) |
473 | return ret; |
475 | return ret; |
474 | } |
476 | } |
475 | 477 | ||
476 | i915_emit_breadcrumb(dev); |
478 | i915_emit_breadcrumb(dev); |
477 | return 0; |
479 | return 0; |
478 | } |
480 | } |
479 | 481 | ||
480 | static int i915_dispatch_batchbuffer(struct drm_device * dev, |
482 | static int i915_dispatch_batchbuffer(struct drm_device * dev, |
481 | drm_i915_batchbuffer_t * batch, |
483 | drm_i915_batchbuffer_t * batch, |
482 | struct drm_clip_rect *cliprects) |
484 | struct drm_clip_rect *cliprects) |
483 | { |
485 | { |
484 | struct drm_i915_private *dev_priv = dev->dev_private; |
486 | struct drm_i915_private *dev_priv = dev->dev_private; |
485 | int nbox = batch->num_cliprects; |
487 | int nbox = batch->num_cliprects; |
486 | int i, count, ret; |
488 | int i, count, ret; |
487 | 489 | ||
488 | if ((batch->start | batch->used) & 0x7) { |
490 | if ((batch->start | batch->used) & 0x7) { |
489 | DRM_ERROR("alignment"); |
491 | DRM_ERROR("alignment"); |
490 | return -EINVAL; |
492 | return -EINVAL; |
491 | } |
493 | } |
492 | 494 | ||
493 | i915_kernel_lost_context(dev); |
495 | i915_kernel_lost_context(dev); |
494 | 496 | ||
495 | count = nbox ? nbox : 1; |
497 | count = nbox ? nbox : 1; |
496 | for (i = 0; i < count; i++) { |
498 | for (i = 0; i < count; i++) { |
497 | if (i < nbox) { |
499 | if (i < nbox) { |
498 | ret = i915_emit_box(dev, &cliprects[i], |
500 | ret = i915_emit_box(dev, &cliprects[i], |
499 | batch->DR1, batch->DR4); |
501 | batch->DR1, batch->DR4); |
500 | if (ret) |
502 | if (ret) |
501 | return ret; |
503 | return ret; |
502 | } |
504 | } |
503 | 505 | ||
504 | if (!IS_I830(dev) && !IS_845G(dev)) { |
506 | if (!IS_I830(dev) && !IS_845G(dev)) { |
505 | ret = BEGIN_LP_RING(2); |
507 | ret = BEGIN_LP_RING(2); |
506 | if (ret) |
508 | if (ret) |
507 | return ret; |
509 | return ret; |
508 | 510 | ||
509 | if (INTEL_INFO(dev)->gen >= 4) { |
511 | if (INTEL_INFO(dev)->gen >= 4) { |
510 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); |
512 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); |
511 | OUT_RING(batch->start); |
513 | OUT_RING(batch->start); |
512 | } else { |
514 | } else { |
513 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); |
515 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); |
514 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); |
516 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); |
515 | } |
517 | } |
516 | } else { |
518 | } else { |
517 | ret = BEGIN_LP_RING(4); |
519 | ret = BEGIN_LP_RING(4); |
518 | if (ret) |
520 | if (ret) |
519 | return ret; |
521 | return ret; |
520 | 522 | ||
521 | OUT_RING(MI_BATCH_BUFFER); |
523 | OUT_RING(MI_BATCH_BUFFER); |
522 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); |
524 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); |
523 | OUT_RING(batch->start + batch->used - 4); |
525 | OUT_RING(batch->start + batch->used - 4); |
524 | OUT_RING(0); |
526 | OUT_RING(0); |
525 | } |
527 | } |
526 | ADVANCE_LP_RING(); |
528 | ADVANCE_LP_RING(); |
527 | } |
529 | } |
528 | 530 | ||
529 | 531 | ||
530 | if (IS_G4X(dev) || IS_GEN5(dev)) { |
532 | if (IS_G4X(dev) || IS_GEN5(dev)) { |
531 | if (BEGIN_LP_RING(2) == 0) { |
533 | if (BEGIN_LP_RING(2) == 0) { |
532 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); |
534 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); |
533 | OUT_RING(MI_NOOP); |
535 | OUT_RING(MI_NOOP); |
534 | ADVANCE_LP_RING(); |
536 | ADVANCE_LP_RING(); |
535 | } |
537 | } |
536 | } |
538 | } |
537 | 539 | ||
538 | i915_emit_breadcrumb(dev); |
540 | i915_emit_breadcrumb(dev); |
539 | return 0; |
541 | return 0; |
540 | } |
542 | } |
541 | 543 | ||
542 | static int i915_dispatch_flip(struct drm_device * dev) |
544 | static int i915_dispatch_flip(struct drm_device * dev) |
543 | { |
545 | { |
544 | drm_i915_private_t *dev_priv = dev->dev_private; |
546 | drm_i915_private_t *dev_priv = dev->dev_private; |
545 | struct drm_i915_master_private *master_priv = |
547 | struct drm_i915_master_private *master_priv = |
546 | dev->primary->master->driver_priv; |
548 | dev->primary->master->driver_priv; |
547 | int ret; |
549 | int ret; |
548 | 550 | ||
549 | if (!master_priv->sarea_priv) |
551 | if (!master_priv->sarea_priv) |
550 | return -EINVAL; |
552 | return -EINVAL; |
551 | 553 | ||
552 | DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", |
554 | DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", |
553 | __func__, |
555 | __func__, |
554 | dev_priv->dri1.current_page, |
556 | dev_priv->dri1.current_page, |
555 | master_priv->sarea_priv->pf_current_page); |
557 | master_priv->sarea_priv->pf_current_page); |
556 | 558 | ||
557 | i915_kernel_lost_context(dev); |
559 | i915_kernel_lost_context(dev); |
558 | 560 | ||
559 | ret = BEGIN_LP_RING(10); |
561 | ret = BEGIN_LP_RING(10); |
560 | if (ret) |
562 | if (ret) |
561 | return ret; |
563 | return ret; |
562 | 564 | ||
563 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); |
565 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); |
564 | OUT_RING(0); |
566 | OUT_RING(0); |
565 | 567 | ||
566 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); |
568 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); |
567 | OUT_RING(0); |
569 | OUT_RING(0); |
568 | if (dev_priv->dri1.current_page == 0) { |
570 | if (dev_priv->dri1.current_page == 0) { |
569 | OUT_RING(dev_priv->dri1.back_offset); |
571 | OUT_RING(dev_priv->dri1.back_offset); |
570 | dev_priv->dri1.current_page = 1; |
572 | dev_priv->dri1.current_page = 1; |
571 | } else { |
573 | } else { |
572 | OUT_RING(dev_priv->dri1.front_offset); |
574 | OUT_RING(dev_priv->dri1.front_offset); |
573 | dev_priv->dri1.current_page = 0; |
575 | dev_priv->dri1.current_page = 0; |
574 | } |
576 | } |
575 | OUT_RING(0); |
577 | OUT_RING(0); |
576 | 578 | ||
577 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); |
579 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); |
578 | OUT_RING(0); |
580 | OUT_RING(0); |
579 | 581 | ||
580 | ADVANCE_LP_RING(); |
582 | ADVANCE_LP_RING(); |
581 | 583 | ||
582 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++; |
584 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++; |
583 | 585 | ||
584 | if (BEGIN_LP_RING(4) == 0) { |
586 | if (BEGIN_LP_RING(4) == 0) { |
585 | OUT_RING(MI_STORE_DWORD_INDEX); |
587 | OUT_RING(MI_STORE_DWORD_INDEX); |
586 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
588 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
587 | OUT_RING(dev_priv->dri1.counter); |
589 | OUT_RING(dev_priv->dri1.counter); |
588 | OUT_RING(0); |
590 | OUT_RING(0); |
589 | ADVANCE_LP_RING(); |
591 | ADVANCE_LP_RING(); |
590 | } |
592 | } |
591 | 593 | ||
592 | master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page; |
594 | master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page; |
593 | return 0; |
595 | return 0; |
594 | } |
596 | } |
595 | 597 | ||
596 | static int i915_quiescent(struct drm_device *dev) |
598 | static int i915_quiescent(struct drm_device *dev) |
597 | { |
599 | { |
598 | i915_kernel_lost_context(dev); |
600 | i915_kernel_lost_context(dev); |
599 | return intel_ring_idle(LP_RING(dev->dev_private)); |
601 | return intel_ring_idle(LP_RING(dev->dev_private)); |
600 | } |
602 | } |
601 | 603 | ||
602 | static int i915_flush_ioctl(struct drm_device *dev, void *data, |
604 | static int i915_flush_ioctl(struct drm_device *dev, void *data, |
603 | struct drm_file *file_priv) |
605 | struct drm_file *file_priv) |
604 | { |
606 | { |
605 | int ret; |
607 | int ret; |
606 | 608 | ||
607 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
609 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
608 | return -ENODEV; |
610 | return -ENODEV; |
609 | 611 | ||
610 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
612 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
611 | 613 | ||
612 | mutex_lock(&dev->struct_mutex); |
614 | mutex_lock(&dev->struct_mutex); |
613 | ret = i915_quiescent(dev); |
615 | ret = i915_quiescent(dev); |
614 | mutex_unlock(&dev->struct_mutex); |
616 | mutex_unlock(&dev->struct_mutex); |
615 | 617 | ||
616 | return ret; |
618 | return ret; |
617 | } |
619 | } |
618 | 620 | ||
619 | static int i915_batchbuffer(struct drm_device *dev, void *data, |
621 | static int i915_batchbuffer(struct drm_device *dev, void *data, |
620 | struct drm_file *file_priv) |
622 | struct drm_file *file_priv) |
621 | { |
623 | { |
622 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
624 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
623 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
625 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
624 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
626 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
625 | master_priv->sarea_priv; |
627 | master_priv->sarea_priv; |
626 | drm_i915_batchbuffer_t *batch = data; |
628 | drm_i915_batchbuffer_t *batch = data; |
627 | int ret; |
629 | int ret; |
628 | struct drm_clip_rect *cliprects = NULL; |
630 | struct drm_clip_rect *cliprects = NULL; |
629 | 631 | ||
630 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
632 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
631 | return -ENODEV; |
633 | return -ENODEV; |
632 | 634 | ||
633 | if (!dev_priv->dri1.allow_batchbuffer) { |
635 | if (!dev_priv->dri1.allow_batchbuffer) { |
634 | DRM_ERROR("Batchbuffer ioctl disabled\n"); |
636 | DRM_ERROR("Batchbuffer ioctl disabled\n"); |
635 | return -EINVAL; |
637 | return -EINVAL; |
636 | } |
638 | } |
637 | 639 | ||
638 | DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", |
640 | DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", |
639 | batch->start, batch->used, batch->num_cliprects); |
641 | batch->start, batch->used, batch->num_cliprects); |
640 | 642 | ||
641 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
643 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
642 | 644 | ||
643 | if (batch->num_cliprects < 0) |
645 | if (batch->num_cliprects < 0) |
644 | return -EINVAL; |
646 | return -EINVAL; |
645 | 647 | ||
646 | if (batch->num_cliprects) { |
648 | if (batch->num_cliprects) { |
647 | cliprects = kcalloc(batch->num_cliprects, |
649 | cliprects = kcalloc(batch->num_cliprects, |
648 | sizeof(struct drm_clip_rect), |
650 | sizeof(struct drm_clip_rect), |
649 | GFP_KERNEL); |
651 | GFP_KERNEL); |
650 | if (cliprects == NULL) |
652 | if (cliprects == NULL) |
651 | return -ENOMEM; |
653 | return -ENOMEM; |
652 | 654 | ||
653 | ret = copy_from_user(cliprects, batch->cliprects, |
655 | ret = copy_from_user(cliprects, batch->cliprects, |
654 | batch->num_cliprects * |
656 | batch->num_cliprects * |
655 | sizeof(struct drm_clip_rect)); |
657 | sizeof(struct drm_clip_rect)); |
656 | if (ret != 0) { |
658 | if (ret != 0) { |
657 | ret = -EFAULT; |
659 | ret = -EFAULT; |
658 | goto fail_free; |
660 | goto fail_free; |
659 | } |
661 | } |
660 | } |
662 | } |
661 | 663 | ||
662 | mutex_lock(&dev->struct_mutex); |
664 | mutex_lock(&dev->struct_mutex); |
663 | ret = i915_dispatch_batchbuffer(dev, batch, cliprects); |
665 | ret = i915_dispatch_batchbuffer(dev, batch, cliprects); |
664 | mutex_unlock(&dev->struct_mutex); |
666 | mutex_unlock(&dev->struct_mutex); |
665 | 667 | ||
666 | if (sarea_priv) |
668 | if (sarea_priv) |
667 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
669 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
668 | 670 | ||
669 | fail_free: |
671 | fail_free: |
670 | kfree(cliprects); |
672 | kfree(cliprects); |
671 | 673 | ||
672 | return ret; |
674 | return ret; |
673 | } |
675 | } |
674 | 676 | ||
675 | static int i915_cmdbuffer(struct drm_device *dev, void *data, |
677 | static int i915_cmdbuffer(struct drm_device *dev, void *data, |
676 | struct drm_file *file_priv) |
678 | struct drm_file *file_priv) |
677 | { |
679 | { |
678 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
680 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
679 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
681 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
680 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
682 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
681 | master_priv->sarea_priv; |
683 | master_priv->sarea_priv; |
682 | drm_i915_cmdbuffer_t *cmdbuf = data; |
684 | drm_i915_cmdbuffer_t *cmdbuf = data; |
683 | struct drm_clip_rect *cliprects = NULL; |
685 | struct drm_clip_rect *cliprects = NULL; |
684 | void *batch_data; |
686 | void *batch_data; |
685 | int ret; |
687 | int ret; |
686 | 688 | ||
687 | DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", |
689 | DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", |
688 | cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); |
690 | cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); |
689 | 691 | ||
690 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
692 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
691 | return -ENODEV; |
693 | return -ENODEV; |
692 | 694 | ||
693 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
695 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
694 | 696 | ||
695 | if (cmdbuf->num_cliprects < 0) |
697 | if (cmdbuf->num_cliprects < 0) |
696 | return -EINVAL; |
698 | return -EINVAL; |
697 | 699 | ||
698 | batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); |
700 | batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); |
699 | if (batch_data == NULL) |
701 | if (batch_data == NULL) |
700 | return -ENOMEM; |
702 | return -ENOMEM; |
701 | 703 | ||
702 | ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); |
704 | ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); |
703 | if (ret != 0) { |
705 | if (ret != 0) { |
704 | ret = -EFAULT; |
706 | ret = -EFAULT; |
705 | goto fail_batch_free; |
707 | goto fail_batch_free; |
706 | } |
708 | } |
707 | 709 | ||
708 | if (cmdbuf->num_cliprects) { |
710 | if (cmdbuf->num_cliprects) { |
709 | cliprects = kcalloc(cmdbuf->num_cliprects, |
711 | cliprects = kcalloc(cmdbuf->num_cliprects, |
710 | sizeof(struct drm_clip_rect), GFP_KERNEL); |
712 | sizeof(struct drm_clip_rect), GFP_KERNEL); |
711 | if (cliprects == NULL) { |
713 | if (cliprects == NULL) { |
712 | ret = -ENOMEM; |
714 | ret = -ENOMEM; |
713 | goto fail_batch_free; |
715 | goto fail_batch_free; |
714 | } |
716 | } |
715 | 717 | ||
716 | ret = copy_from_user(cliprects, cmdbuf->cliprects, |
718 | ret = copy_from_user(cliprects, cmdbuf->cliprects, |
717 | cmdbuf->num_cliprects * |
719 | cmdbuf->num_cliprects * |
718 | sizeof(struct drm_clip_rect)); |
720 | sizeof(struct drm_clip_rect)); |
719 | if (ret != 0) { |
721 | if (ret != 0) { |
720 | ret = -EFAULT; |
722 | ret = -EFAULT; |
721 | goto fail_clip_free; |
723 | goto fail_clip_free; |
722 | } |
724 | } |
723 | } |
725 | } |
724 | 726 | ||
725 | mutex_lock(&dev->struct_mutex); |
727 | mutex_lock(&dev->struct_mutex); |
726 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); |
728 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); |
727 | mutex_unlock(&dev->struct_mutex); |
729 | mutex_unlock(&dev->struct_mutex); |
728 | if (ret) { |
730 | if (ret) { |
729 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); |
731 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); |
730 | goto fail_clip_free; |
732 | goto fail_clip_free; |
731 | } |
733 | } |
732 | 734 | ||
733 | if (sarea_priv) |
735 | if (sarea_priv) |
734 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
736 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
735 | 737 | ||
736 | fail_clip_free: |
738 | fail_clip_free: |
737 | kfree(cliprects); |
739 | kfree(cliprects); |
738 | fail_batch_free: |
740 | fail_batch_free: |
739 | kfree(batch_data); |
741 | kfree(batch_data); |
740 | 742 | ||
741 | return ret; |
743 | return ret; |
742 | } |
744 | } |
743 | 745 | ||
744 | static int i915_emit_irq(struct drm_device * dev) |
746 | static int i915_emit_irq(struct drm_device * dev) |
745 | { |
747 | { |
746 | drm_i915_private_t *dev_priv = dev->dev_private; |
748 | drm_i915_private_t *dev_priv = dev->dev_private; |
747 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
749 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
748 | 750 | ||
749 | i915_kernel_lost_context(dev); |
751 | i915_kernel_lost_context(dev); |
750 | 752 | ||
751 | DRM_DEBUG_DRIVER("\n"); |
753 | DRM_DEBUG_DRIVER("\n"); |
752 | 754 | ||
753 | dev_priv->dri1.counter++; |
755 | dev_priv->dri1.counter++; |
754 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) |
756 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) |
755 | dev_priv->dri1.counter = 1; |
757 | dev_priv->dri1.counter = 1; |
756 | if (master_priv->sarea_priv) |
758 | if (master_priv->sarea_priv) |
757 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; |
759 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; |
758 | 760 | ||
759 | if (BEGIN_LP_RING(4) == 0) { |
761 | if (BEGIN_LP_RING(4) == 0) { |
760 | OUT_RING(MI_STORE_DWORD_INDEX); |
762 | OUT_RING(MI_STORE_DWORD_INDEX); |
761 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
763 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
762 | OUT_RING(dev_priv->dri1.counter); |
764 | OUT_RING(dev_priv->dri1.counter); |
763 | OUT_RING(MI_USER_INTERRUPT); |
765 | OUT_RING(MI_USER_INTERRUPT); |
764 | ADVANCE_LP_RING(); |
766 | ADVANCE_LP_RING(); |
765 | } |
767 | } |
766 | 768 | ||
767 | return dev_priv->dri1.counter; |
769 | return dev_priv->dri1.counter; |
768 | } |
770 | } |
769 | 771 | ||
770 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) |
772 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) |
771 | { |
773 | { |
772 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
774 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
773 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
775 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
774 | int ret = 0; |
776 | int ret = 0; |
775 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
777 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
776 | 778 | ||
777 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, |
779 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, |
778 | READ_BREADCRUMB(dev_priv)); |
780 | READ_BREADCRUMB(dev_priv)); |
779 | 781 | ||
780 | if (READ_BREADCRUMB(dev_priv) >= irq_nr) { |
782 | if (READ_BREADCRUMB(dev_priv) >= irq_nr) { |
781 | if (master_priv->sarea_priv) |
783 | if (master_priv->sarea_priv) |
782 | master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
784 | master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
783 | return 0; |
785 | return 0; |
784 | } |
786 | } |
785 | 787 | ||
786 | if (master_priv->sarea_priv) |
788 | if (master_priv->sarea_priv) |
787 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
789 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
788 | 790 | ||
789 | if (ring->irq_get(ring)) { |
791 | if (ring->irq_get(ring)) { |
790 | DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, |
792 | DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, |
791 | READ_BREADCRUMB(dev_priv) >= irq_nr); |
793 | READ_BREADCRUMB(dev_priv) >= irq_nr); |
792 | ring->irq_put(ring); |
794 | ring->irq_put(ring); |
793 | } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) |
795 | } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) |
794 | ret = -EBUSY; |
796 | ret = -EBUSY; |
795 | 797 | ||
796 | if (ret == -EBUSY) { |
798 | if (ret == -EBUSY) { |
797 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", |
799 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", |
798 | READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter); |
800 | READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter); |
799 | } |
801 | } |
800 | 802 | ||
801 | return ret; |
803 | return ret; |
802 | } |
804 | } |
803 | 805 | ||
804 | /* Needs the lock as it touches the ring. |
806 | /* Needs the lock as it touches the ring. |
805 | */ |
807 | */ |
806 | static int i915_irq_emit(struct drm_device *dev, void *data, |
808 | static int i915_irq_emit(struct drm_device *dev, void *data, |
807 | struct drm_file *file_priv) |
809 | struct drm_file *file_priv) |
808 | { |
810 | { |
809 | drm_i915_private_t *dev_priv = dev->dev_private; |
811 | drm_i915_private_t *dev_priv = dev->dev_private; |
810 | drm_i915_irq_emit_t *emit = data; |
812 | drm_i915_irq_emit_t *emit = data; |
811 | int result; |
813 | int result; |
812 | 814 | ||
813 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
815 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
814 | return -ENODEV; |
816 | return -ENODEV; |
815 | 817 | ||
816 | if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { |
818 | if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { |
817 | DRM_ERROR("called with no initialization\n"); |
819 | DRM_ERROR("called with no initialization\n"); |
818 | return -EINVAL; |
820 | return -EINVAL; |
819 | } |
821 | } |
820 | 822 | ||
821 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
823 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
822 | 824 | ||
823 | mutex_lock(&dev->struct_mutex); |
825 | mutex_lock(&dev->struct_mutex); |
824 | result = i915_emit_irq(dev); |
826 | result = i915_emit_irq(dev); |
825 | mutex_unlock(&dev->struct_mutex); |
827 | mutex_unlock(&dev->struct_mutex); |
826 | 828 | ||
827 | if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { |
829 | if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { |
828 | DRM_ERROR("copy_to_user\n"); |
830 | DRM_ERROR("copy_to_user\n"); |
829 | return -EFAULT; |
831 | return -EFAULT; |
830 | } |
832 | } |
831 | 833 | ||
832 | return 0; |
834 | return 0; |
833 | } |
835 | } |
834 | 836 | ||
835 | /* Doesn't need the hardware lock. |
837 | /* Doesn't need the hardware lock. |
836 | */ |
838 | */ |
837 | static int i915_irq_wait(struct drm_device *dev, void *data, |
839 | static int i915_irq_wait(struct drm_device *dev, void *data, |
838 | struct drm_file *file_priv) |
840 | struct drm_file *file_priv) |
839 | { |
841 | { |
840 | drm_i915_private_t *dev_priv = dev->dev_private; |
842 | drm_i915_private_t *dev_priv = dev->dev_private; |
841 | drm_i915_irq_wait_t *irqwait = data; |
843 | drm_i915_irq_wait_t *irqwait = data; |
842 | 844 | ||
843 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
845 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
844 | return -ENODEV; |
846 | return -ENODEV; |
845 | 847 | ||
846 | if (!dev_priv) { |
848 | if (!dev_priv) { |
847 | DRM_ERROR("called with no initialization\n"); |
849 | DRM_ERROR("called with no initialization\n"); |
848 | return -EINVAL; |
850 | return -EINVAL; |
849 | } |
851 | } |
850 | 852 | ||
851 | return i915_wait_irq(dev, irqwait->irq_seq); |
853 | return i915_wait_irq(dev, irqwait->irq_seq); |
852 | } |
854 | } |
853 | 855 | ||
854 | static int i915_vblank_pipe_get(struct drm_device *dev, void *data, |
856 | static int i915_vblank_pipe_get(struct drm_device *dev, void *data, |
855 | struct drm_file *file_priv) |
857 | struct drm_file *file_priv) |
856 | { |
858 | { |
857 | drm_i915_private_t *dev_priv = dev->dev_private; |
859 | drm_i915_private_t *dev_priv = dev->dev_private; |
858 | drm_i915_vblank_pipe_t *pipe = data; |
860 | drm_i915_vblank_pipe_t *pipe = data; |
859 | 861 | ||
860 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
862 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
861 | return -ENODEV; |
863 | return -ENODEV; |
862 | 864 | ||
863 | if (!dev_priv) { |
865 | if (!dev_priv) { |
864 | DRM_ERROR("called with no initialization\n"); |
866 | DRM_ERROR("called with no initialization\n"); |
865 | return -EINVAL; |
867 | return -EINVAL; |
866 | } |
868 | } |
867 | 869 | ||
868 | pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; |
870 | pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; |
869 | 871 | ||
870 | return 0; |
872 | return 0; |
871 | } |
873 | } |
872 | 874 | ||
873 | /** |
875 | /** |
874 | * Schedule buffer swap at given vertical blank. |
876 | * Schedule buffer swap at given vertical blank. |
875 | */ |
877 | */ |
876 | static int i915_vblank_swap(struct drm_device *dev, void *data, |
878 | static int i915_vblank_swap(struct drm_device *dev, void *data, |
877 | struct drm_file *file_priv) |
879 | struct drm_file *file_priv) |
878 | { |
880 | { |
879 | /* The delayed swap mechanism was fundamentally racy, and has been |
881 | /* The delayed swap mechanism was fundamentally racy, and has been |
880 | * removed. The model was that the client requested a delayed flip/swap |
882 | * removed. The model was that the client requested a delayed flip/swap |
881 | * from the kernel, then waited for vblank before continuing to perform |
883 | * from the kernel, then waited for vblank before continuing to perform |
882 | * rendering. The problem was that the kernel might wake the client |
884 | * rendering. The problem was that the kernel might wake the client |
883 | * up before it dispatched the vblank swap (since the lock has to be |
885 | * up before it dispatched the vblank swap (since the lock has to be |
884 | * held while touching the ringbuffer), in which case the client would |
886 | * held while touching the ringbuffer), in which case the client would |
885 | * clear and start the next frame before the swap occurred, and |
887 | * clear and start the next frame before the swap occurred, and |
886 | * flicker would occur in addition to likely missing the vblank. |
888 | * flicker would occur in addition to likely missing the vblank. |
887 | * |
889 | * |
888 | * In the absence of this ioctl, userland falls back to a correct path |
890 | * In the absence of this ioctl, userland falls back to a correct path |
889 | * of waiting for a vblank, then dispatching the swap on its own. |
891 | * of waiting for a vblank, then dispatching the swap on its own. |
890 | * Context switching to userland and back is plenty fast enough for |
892 | * Context switching to userland and back is plenty fast enough for |
891 | * meeting the requirements of vblank swapping. |
893 | * meeting the requirements of vblank swapping. |
892 | */ |
894 | */ |
893 | return -EINVAL; |
895 | return -EINVAL; |
894 | } |
896 | } |
895 | 897 | ||
896 | static int i915_flip_bufs(struct drm_device *dev, void *data, |
898 | static int i915_flip_bufs(struct drm_device *dev, void *data, |
897 | struct drm_file *file_priv) |
899 | struct drm_file *file_priv) |
898 | { |
900 | { |
899 | int ret; |
901 | int ret; |
900 | 902 | ||
901 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
903 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
902 | return -ENODEV; |
904 | return -ENODEV; |
903 | 905 | ||
904 | DRM_DEBUG_DRIVER("%s\n", __func__); |
906 | DRM_DEBUG_DRIVER("%s\n", __func__); |
905 | 907 | ||
906 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
908 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
907 | 909 | ||
908 | mutex_lock(&dev->struct_mutex); |
910 | mutex_lock(&dev->struct_mutex); |
909 | ret = i915_dispatch_flip(dev); |
911 | ret = i915_dispatch_flip(dev); |
910 | mutex_unlock(&dev->struct_mutex); |
912 | mutex_unlock(&dev->struct_mutex); |
911 | 913 | ||
912 | return ret; |
914 | return ret; |
913 | } |
915 | } |
914 | #endif |
916 | #endif |
915 | 917 | ||
916 | static int i915_getparam(struct drm_device *dev, void *data, |
918 | int i915_getparam(struct drm_device *dev, void *data, |
917 | struct drm_file *file_priv) |
919 | struct drm_file *file_priv) |
918 | { |
920 | { |
919 | drm_i915_private_t *dev_priv = dev->dev_private; |
921 | drm_i915_private_t *dev_priv = dev->dev_private; |
920 | drm_i915_getparam_t *param = data; |
922 | drm_i915_getparam_t *param = data; |
921 | int value; |
923 | int value; |
922 | 924 | ||
923 | if (!dev_priv) { |
925 | if (!dev_priv) { |
924 | DRM_ERROR("called with no initialization\n"); |
926 | DRM_ERROR("called with no initialization\n"); |
925 | return -EINVAL; |
927 | return -EINVAL; |
926 | } |
928 | } |
927 | 929 | ||
928 | switch (param->param) { |
930 | switch (param->param) { |
929 | case I915_PARAM_IRQ_ACTIVE: |
931 | case I915_PARAM_IRQ_ACTIVE: |
930 | value = dev->pdev->irq ? 1 : 0; |
932 | value = dev->pdev->irq ? 1 : 0; |
931 | break; |
933 | break; |
932 | case I915_PARAM_ALLOW_BATCHBUFFER: |
934 | case I915_PARAM_ALLOW_BATCHBUFFER: |
933 | value = dev_priv->dri1.allow_batchbuffer ? 1 : 0; |
935 | value = dev_priv->dri1.allow_batchbuffer ? 1 : 0; |
934 | break; |
936 | break; |
935 | case I915_PARAM_LAST_DISPATCH: |
937 | case I915_PARAM_LAST_DISPATCH: |
936 | value = READ_BREADCRUMB(dev_priv); |
938 | value = READ_BREADCRUMB(dev_priv); |
937 | break; |
939 | break; |
938 | case I915_PARAM_CHIPSET_ID: |
940 | case I915_PARAM_CHIPSET_ID: |
939 | value = dev->pci_device; |
941 | value = dev->pci_device; |
940 | break; |
942 | break; |
941 | case I915_PARAM_HAS_GEM: |
943 | case I915_PARAM_HAS_GEM: |
942 | value = 1; |
944 | value = 1; |
943 | break; |
945 | break; |
944 | case I915_PARAM_NUM_FENCES_AVAIL: |
946 | case I915_PARAM_NUM_FENCES_AVAIL: |
945 | value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; |
947 | value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; |
946 | break; |
948 | break; |
947 | case I915_PARAM_HAS_OVERLAY: |
949 | case I915_PARAM_HAS_OVERLAY: |
948 | value = dev_priv->overlay ? 1 : 0; |
950 | value = dev_priv->overlay ? 1 : 0; |
949 | break; |
951 | break; |
950 | case I915_PARAM_HAS_PAGEFLIPPING: |
952 | case I915_PARAM_HAS_PAGEFLIPPING: |
951 | value = 1; |
953 | value = 1; |
952 | break; |
954 | break; |
953 | case I915_PARAM_HAS_EXECBUF2: |
955 | case I915_PARAM_HAS_EXECBUF2: |
954 | /* depends on GEM */ |
956 | /* depends on GEM */ |
955 | value = 1; |
957 | value = 1; |
956 | break; |
958 | break; |
957 | case I915_PARAM_HAS_BSD: |
959 | case I915_PARAM_HAS_BSD: |
958 | value = intel_ring_initialized(&dev_priv->ring[VCS]); |
960 | value = intel_ring_initialized(&dev_priv->ring[VCS]); |
959 | break; |
961 | break; |
960 | case I915_PARAM_HAS_BLT: |
962 | case I915_PARAM_HAS_BLT: |
961 | value = intel_ring_initialized(&dev_priv->ring[BCS]); |
963 | value = intel_ring_initialized(&dev_priv->ring[BCS]); |
962 | break; |
964 | break; |
- | 965 | case I915_PARAM_HAS_VEBOX: |
|
- | 966 | value = intel_ring_initialized(&dev_priv->ring[VECS]); |
|
- | 967 | break; |
|
963 | case I915_PARAM_HAS_RELAXED_FENCING: |
968 | case I915_PARAM_HAS_RELAXED_FENCING: |
964 | value = 1; |
969 | value = 1; |
965 | break; |
970 | break; |
966 | case I915_PARAM_HAS_COHERENT_RINGS: |
971 | case I915_PARAM_HAS_COHERENT_RINGS: |
967 | value = 1; |
972 | value = 1; |
968 | break; |
973 | break; |
969 | case I915_PARAM_HAS_EXEC_CONSTANTS: |
974 | case I915_PARAM_HAS_EXEC_CONSTANTS: |
970 | value = INTEL_INFO(dev)->gen >= 4; |
975 | value = INTEL_INFO(dev)->gen >= 4; |
971 | break; |
976 | break; |
972 | case I915_PARAM_HAS_RELAXED_DELTA: |
977 | case I915_PARAM_HAS_RELAXED_DELTA: |
973 | value = 1; |
978 | value = 1; |
974 | break; |
979 | break; |
975 | case I915_PARAM_HAS_GEN7_SOL_RESET: |
980 | case I915_PARAM_HAS_GEN7_SOL_RESET: |
976 | value = 1; |
981 | value = 1; |
977 | break; |
982 | break; |
978 | case I915_PARAM_HAS_LLC: |
983 | case I915_PARAM_HAS_LLC: |
979 | value = HAS_LLC(dev); |
984 | value = HAS_LLC(dev); |
980 | break; |
985 | break; |
- | 986 | case I915_PARAM_HAS_WT: |
|
- | 987 | value = HAS_WT(dev); |
|
- | 988 | break; |
|
981 | case I915_PARAM_HAS_ALIASING_PPGTT: |
989 | case I915_PARAM_HAS_ALIASING_PPGTT: |
982 | value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; |
990 | value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; |
983 | break; |
991 | break; |
984 | case I915_PARAM_HAS_WAIT_TIMEOUT: |
992 | case I915_PARAM_HAS_WAIT_TIMEOUT: |
985 | value = 1; |
993 | value = 1; |
986 | break; |
994 | break; |
987 | case I915_PARAM_HAS_SEMAPHORES: |
995 | case I915_PARAM_HAS_SEMAPHORES: |
988 | value = i915_semaphore_is_enabled(dev); |
996 | value = i915_semaphore_is_enabled(dev); |
989 | break; |
997 | break; |
990 | case I915_PARAM_HAS_PRIME_VMAP_FLUSH: |
998 | case I915_PARAM_HAS_PRIME_VMAP_FLUSH: |
991 | value = 1; |
999 | value = 1; |
992 | break; |
1000 | break; |
993 | case I915_PARAM_HAS_SECURE_BATCHES: |
1001 | case I915_PARAM_HAS_SECURE_BATCHES: |
994 | value = 1; |
1002 | value = 1; |
995 | break; |
1003 | break; |
996 | case I915_PARAM_HAS_PINNED_BATCHES: |
1004 | case I915_PARAM_HAS_PINNED_BATCHES: |
997 | value = 1; |
1005 | value = 1; |
998 | break; |
1006 | break; |
999 | case I915_PARAM_HAS_EXEC_NO_RELOC: |
1007 | case I915_PARAM_HAS_EXEC_NO_RELOC: |
1000 | value = 1; |
1008 | value = 1; |
1001 | break; |
1009 | break; |
1002 | case I915_PARAM_HAS_EXEC_HANDLE_LUT: |
1010 | case I915_PARAM_HAS_EXEC_HANDLE_LUT: |
1003 | value = 1; |
1011 | value = 0; //1; |
1004 | break; |
1012 | break; |
1005 | default: |
1013 | default: |
1006 | DRM_DEBUG("Unknown parameter %d\n", param->param); |
1014 | DRM_DEBUG("Unknown parameter %d\n", param->param); |
1007 | return -EINVAL; |
1015 | return -EINVAL; |
1008 | } |
1016 | } |
1009 | 1017 | ||
1010 | // if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { |
1018 | // if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { |
1011 | // DRM_ERROR("DRM_COPY_TO_USER failed\n"); |
1019 | // DRM_ERROR("DRM_COPY_TO_USER failed\n"); |
1012 | // return -EFAULT; |
1020 | // return -EFAULT; |
1013 | // } |
1021 | // } |
1014 | 1022 | ||
1015 | *param->value = value; |
1023 | *param->value = value; |
1016 | 1024 | ||
1017 | return 0; |
1025 | return 0; |
1018 | } |
1026 | } |
1019 | 1027 | ||
1020 | #if 0 |
1028 | #if 0 |
1021 | static int i915_setparam(struct drm_device *dev, void *data, |
1029 | static int i915_setparam(struct drm_device *dev, void *data, |
1022 | struct drm_file *file_priv) |
1030 | struct drm_file *file_priv) |
1023 | { |
1031 | { |
1024 | drm_i915_private_t *dev_priv = dev->dev_private; |
1032 | drm_i915_private_t *dev_priv = dev->dev_private; |
1025 | drm_i915_setparam_t *param = data; |
1033 | drm_i915_setparam_t *param = data; |
1026 | 1034 | ||
1027 | if (!dev_priv) { |
1035 | if (!dev_priv) { |
1028 | DRM_ERROR("called with no initialization\n"); |
1036 | DRM_ERROR("called with no initialization\n"); |
1029 | return -EINVAL; |
1037 | return -EINVAL; |
1030 | } |
1038 | } |
1031 | 1039 | ||
1032 | switch (param->param) { |
1040 | switch (param->param) { |
1033 | case I915_SETPARAM_USE_MI_BATCHBUFFER_START: |
1041 | case I915_SETPARAM_USE_MI_BATCHBUFFER_START: |
1034 | break; |
1042 | break; |
1035 | case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: |
1043 | case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: |
1036 | break; |
1044 | break; |
1037 | case I915_SETPARAM_ALLOW_BATCHBUFFER: |
1045 | case I915_SETPARAM_ALLOW_BATCHBUFFER: |
1038 | dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; |
1046 | dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; |
1039 | break; |
1047 | break; |
1040 | case I915_SETPARAM_NUM_USED_FENCES: |
1048 | case I915_SETPARAM_NUM_USED_FENCES: |
1041 | if (param->value > dev_priv->num_fence_regs || |
1049 | if (param->value > dev_priv->num_fence_regs || |
1042 | param->value < 0) |
1050 | param->value < 0) |
1043 | return -EINVAL; |
1051 | return -EINVAL; |
1044 | /* Userspace can use first N regs */ |
1052 | /* Userspace can use first N regs */ |
1045 | dev_priv->fence_reg_start = param->value; |
1053 | dev_priv->fence_reg_start = param->value; |
1046 | break; |
1054 | break; |
1047 | default: |
1055 | default: |
1048 | DRM_DEBUG_DRIVER("unknown parameter %d\n", |
1056 | DRM_DEBUG_DRIVER("unknown parameter %d\n", |
1049 | param->param); |
1057 | param->param); |
1050 | return -EINVAL; |
1058 | return -EINVAL; |
1051 | } |
1059 | } |
1052 | 1060 | ||
1053 | return 0; |
1061 | return 0; |
1054 | } |
1062 | } |
1055 | #endif |
1063 | #endif |
1056 | 1064 | ||
1057 | 1065 | ||
1058 | 1066 | ||
1059 | static int i915_get_bridge_dev(struct drm_device *dev) |
1067 | static int i915_get_bridge_dev(struct drm_device *dev) |
1060 | { |
1068 | { |
1061 | struct drm_i915_private *dev_priv = dev->dev_private; |
1069 | struct drm_i915_private *dev_priv = dev->dev_private; |
1062 | 1070 | ||
1063 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); |
1071 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); |
1064 | if (!dev_priv->bridge_dev) { |
1072 | if (!dev_priv->bridge_dev) { |
1065 | DRM_ERROR("bridge device not found\n"); |
1073 | DRM_ERROR("bridge device not found\n"); |
1066 | return -1; |
1074 | return -1; |
1067 | } |
1075 | } |
1068 | return 0; |
1076 | return 0; |
1069 | } |
1077 | } |
1070 | 1078 | ||
1071 | #define MCHBAR_I915 0x44 |
1079 | #define MCHBAR_I915 0x44 |
1072 | #define MCHBAR_I965 0x48 |
1080 | #define MCHBAR_I965 0x48 |
1073 | #define MCHBAR_SIZE (4*4096) |
1081 | #define MCHBAR_SIZE (4*4096) |
1074 | 1082 | ||
1075 | #define DEVEN_REG 0x54 |
1083 | #define DEVEN_REG 0x54 |
1076 | #define DEVEN_MCHBAR_EN (1 << 28) |
1084 | #define DEVEN_MCHBAR_EN (1 << 28) |
1077 | 1085 | ||
1078 | 1086 | ||
1079 | 1087 | ||
1080 | 1088 | ||
1081 | /* Setup MCHBAR if possible, return true if we should disable it again */ |
1089 | /* Setup MCHBAR if possible, return true if we should disable it again */ |
1082 | static void |
1090 | static void |
1083 | intel_setup_mchbar(struct drm_device *dev) |
1091 | intel_setup_mchbar(struct drm_device *dev) |
1084 | { |
1092 | { |
1085 | drm_i915_private_t *dev_priv = dev->dev_private; |
1093 | drm_i915_private_t *dev_priv = dev->dev_private; |
1086 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
1094 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
1087 | u32 temp; |
1095 | u32 temp; |
1088 | bool enabled; |
1096 | bool enabled; |
1089 | 1097 | ||
1090 | dev_priv->mchbar_need_disable = false; |
1098 | dev_priv->mchbar_need_disable = false; |
1091 | 1099 | ||
1092 | if (IS_I915G(dev) || IS_I915GM(dev)) { |
1100 | if (IS_I915G(dev) || IS_I915GM(dev)) { |
1093 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); |
1101 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); |
1094 | enabled = !!(temp & DEVEN_MCHBAR_EN); |
1102 | enabled = !!(temp & DEVEN_MCHBAR_EN); |
1095 | } else { |
1103 | } else { |
1096 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); |
1104 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); |
1097 | enabled = temp & 1; |
1105 | enabled = temp & 1; |
1098 | } |
1106 | } |
1099 | 1107 | ||
1100 | /* If it's already enabled, don't have to do anything */ |
1108 | /* If it's already enabled, don't have to do anything */ |
1101 | if (enabled) |
1109 | if (enabled) |
1102 | return; |
1110 | return; |
1103 | 1111 | ||
1104 | dbgprintf("Epic fail\n"); |
1112 | dbgprintf("Epic fail\n"); |
1105 | 1113 | ||
1106 | #if 0 |
1114 | #if 0 |
1107 | if (intel_alloc_mchbar_resource(dev)) |
1115 | if (intel_alloc_mchbar_resource(dev)) |
1108 | return; |
1116 | return; |
1109 | 1117 | ||
1110 | dev_priv->mchbar_need_disable = true; |
1118 | dev_priv->mchbar_need_disable = true; |
1111 | 1119 | ||
1112 | /* Space is allocated or reserved, so enable it. */ |
1120 | /* Space is allocated or reserved, so enable it. */ |
1113 | if (IS_I915G(dev) || IS_I915GM(dev)) { |
1121 | if (IS_I915G(dev) || IS_I915GM(dev)) { |
1114 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, |
1122 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, |
1115 | temp | DEVEN_MCHBAR_EN); |
1123 | temp | DEVEN_MCHBAR_EN); |
1116 | } else { |
1124 | } else { |
1117 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); |
1125 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); |
1118 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); |
1126 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); |
1119 | } |
1127 | } |
1120 | #endif |
1128 | #endif |
1121 | } |
1129 | } |
1122 | 1130 | ||
1123 | 1131 | ||
1124 | /* true = enable decode, false = disable decoder */ |
1132 | /* true = enable decode, false = disable decoder */ |
1125 | static unsigned int i915_vga_set_decode(void *cookie, bool state) |
1133 | static unsigned int i915_vga_set_decode(void *cookie, bool state) |
1126 | { |
1134 | { |
1127 | struct drm_device *dev = cookie; |
1135 | struct drm_device *dev = cookie; |
1128 | 1136 | ||
1129 | intel_modeset_vga_set_state(dev, state); |
1137 | intel_modeset_vga_set_state(dev, state); |
1130 | if (state) |
1138 | if (state) |
1131 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | |
1139 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | |
1132 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
1140 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
1133 | else |
1141 | else |
1134 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
1142 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
1135 | } |
1143 | } |
1136 | 1144 | ||
1137 | 1145 | ||
1138 | 1146 | ||
1139 | 1147 | ||
1140 | 1148 | ||
1141 | 1149 | ||
1142 | static int i915_load_modeset_init(struct drm_device *dev) |
1150 | static int i915_load_modeset_init(struct drm_device *dev) |
1143 | { |
1151 | { |
1144 | struct drm_i915_private *dev_priv = dev->dev_private; |
1152 | struct drm_i915_private *dev_priv = dev->dev_private; |
1145 | int ret; |
1153 | int ret; |
1146 | 1154 | ||
1147 | ret = intel_parse_bios(dev); |
1155 | ret = intel_parse_bios(dev); |
1148 | if (ret) |
1156 | if (ret) |
1149 | DRM_INFO("failed to find VBIOS tables\n"); |
1157 | DRM_INFO("failed to find VBIOS tables\n"); |
1150 | 1158 | ||
1151 | 1159 | ||
1152 | 1160 | ||
1153 | /* Initialise stolen first so that we may reserve preallocated |
1161 | /* Initialise stolen first so that we may reserve preallocated |
1154 | * objects for the BIOS to KMS transition. |
1162 | * objects for the BIOS to KMS transition. |
1155 | */ |
1163 | */ |
1156 | ret = i915_gem_init_stolen(dev); |
1164 | ret = i915_gem_init_stolen(dev); |
1157 | if (ret) |
1165 | if (ret) |
1158 | goto cleanup_vga_switcheroo; |
1166 | goto cleanup_vga_switcheroo; |
1159 | 1167 | ||
1160 | ret = drm_irq_install(dev); |
1168 | ret = drm_irq_install(dev); |
1161 | if (ret) |
1169 | if (ret) |
1162 | goto cleanup_gem_stolen; |
1170 | goto cleanup_gem_stolen; |
1163 | 1171 | ||
1164 | /* Important: The output setup functions called by modeset_init need |
1172 | /* Important: The output setup functions called by modeset_init need |
1165 | * working irqs for e.g. gmbus and dp aux transfers. */ |
1173 | * working irqs for e.g. gmbus and dp aux transfers. */ |
1166 | intel_modeset_init(dev); |
1174 | intel_modeset_init(dev); |
1167 | 1175 | ||
1168 | ret = i915_gem_init(dev); |
1176 | ret = i915_gem_init(dev); |
1169 | if (ret) |
1177 | if (ret) |
1170 | goto cleanup_irq; |
1178 | goto cleanup_irq; |
1171 | 1179 | ||
1172 | 1180 | ||
1173 | intel_modeset_gem_init(dev); |
1181 | intel_modeset_gem_init(dev); |
1174 | 1182 | ||
1175 | /* Always safe in the mode setting case. */ |
1183 | /* Always safe in the mode setting case. */ |
1176 | /* FIXME: do pre/post-mode set stuff in core KMS code */ |
1184 | /* FIXME: do pre/post-mode set stuff in core KMS code */ |
1177 | dev->vblank_disable_allowed = 1; |
1185 | dev->vblank_disable_allowed = 1; |
1178 | if (INTEL_INFO(dev)->num_pipes == 0) |
1186 | if (INTEL_INFO(dev)->num_pipes == 0) |
1179 | return 0; |
1187 | return 0; |
1180 | 1188 | ||
1181 | ret = intel_fbdev_init(dev); |
1189 | ret = intel_fbdev_init(dev); |
1182 | if (ret) |
1190 | if (ret) |
1183 | goto cleanup_gem; |
1191 | goto cleanup_gem; |
1184 | 1192 | ||
1185 | /* Only enable hotplug handling once the fbdev is fully set up. */ |
1193 | /* Only enable hotplug handling once the fbdev is fully set up. */ |
1186 | intel_hpd_init(dev); |
1194 | intel_hpd_init(dev); |
1187 | 1195 | ||
1188 | /* |
1196 | /* |
1189 | * Some ports require correctly set-up hpd registers for detection to |
1197 | * Some ports require correctly set-up hpd registers for detection to |
1190 | * work properly (leading to ghost connected connector status), e.g. VGA |
1198 | * work properly (leading to ghost connected connector status), e.g. VGA |
1191 | * on gm45. Hence we can only set up the initial fbdev config after hpd |
1199 | * on gm45. Hence we can only set up the initial fbdev config after hpd |
1192 | * irqs are fully enabled. Now we should scan for the initial config |
1200 | * irqs are fully enabled. Now we should scan for the initial config |
1193 | * only once hotplug handling is enabled, but due to screwed-up locking |
1201 | * only once hotplug handling is enabled, but due to screwed-up locking |
1194 | * around kms/fbdev init we can't protect the fdbev initial config |
1202 | * around kms/fbdev init we can't protect the fdbev initial config |
1195 | * scanning against hotplug events. Hence do this first and ignore the |
1203 | * scanning against hotplug events. Hence do this first and ignore the |
1196 | * tiny window where we will loose hotplug notifactions. |
1204 | * tiny window where we will loose hotplug notifactions. |
1197 | */ |
1205 | */ |
1198 | intel_fbdev_initial_config(dev); |
1206 | intel_fbdev_initial_config(dev); |
1199 | 1207 | ||
1200 | /* Only enable hotplug handling once the fbdev is fully set up. */ |
1208 | /* Only enable hotplug handling once the fbdev is fully set up. */ |
1201 | dev_priv->enable_hotplug_processing = true; |
1209 | dev_priv->enable_hotplug_processing = true; |
1202 | 1210 | ||
1203 | drm_kms_helper_poll_init(dev); |
1211 | drm_kms_helper_poll_init(dev); |
1204 | 1212 | ||
1205 | return 0; |
1213 | return 0; |
1206 | 1214 | ||
1207 | cleanup_gem: |
1215 | cleanup_gem: |
1208 | mutex_lock(&dev->struct_mutex); |
1216 | mutex_lock(&dev->struct_mutex); |
1209 | i915_gem_cleanup_ringbuffer(dev); |
1217 | i915_gem_cleanup_ringbuffer(dev); |
1210 | mutex_unlock(&dev->struct_mutex); |
1218 | mutex_unlock(&dev->struct_mutex); |
1211 | i915_gem_cleanup_aliasing_ppgtt(dev); |
1219 | i915_gem_cleanup_aliasing_ppgtt(dev); |
1212 | cleanup_irq: |
1220 | cleanup_irq: |
1213 | // drm_irq_uninstall(dev); |
1221 | // drm_irq_uninstall(dev); |
1214 | cleanup_gem_stolen: |
1222 | cleanup_gem_stolen: |
1215 | // i915_gem_cleanup_stolen(dev); |
1223 | // i915_gem_cleanup_stolen(dev); |
1216 | cleanup_vga_switcheroo: |
1224 | cleanup_vga_switcheroo: |
1217 | // vga_switcheroo_unregister_client(dev->pdev); |
1225 | // vga_switcheroo_unregister_client(dev->pdev); |
1218 | cleanup_vga_client: |
1226 | cleanup_vga_client: |
1219 | // vga_client_register(dev->pdev, NULL, NULL, NULL); |
1227 | // vga_client_register(dev->pdev, NULL, NULL, NULL); |
1220 | out: |
1228 | out: |
1221 | return ret; |
1229 | return ret; |
1222 | } |
1230 | } |
1223 | 1231 | ||
1224 | 1232 | ||
1225 | 1233 | ||
1226 | 1234 | ||
1227 | static void i915_dump_device_info(struct drm_i915_private *dev_priv) |
1235 | static void i915_dump_device_info(struct drm_i915_private *dev_priv) |
1228 | { |
1236 | { |
1229 | const struct intel_device_info *info = dev_priv->info; |
1237 | const struct intel_device_info *info = dev_priv->info; |
1230 | 1238 | ||
1231 | #define PRINT_S(name) "%s" |
1239 | #define PRINT_S(name) "%s" |
1232 | #define SEP_EMPTY |
1240 | #define SEP_EMPTY |
1233 | #define PRINT_FLAG(name) info->name ? #name "," : "" |
1241 | #define PRINT_FLAG(name) info->name ? #name "," : "" |
1234 | #define SEP_COMMA , |
1242 | #define SEP_COMMA , |
1235 | DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" |
1243 | DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" |
1236 | DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY), |
1244 | DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY), |
1237 | info->gen, |
1245 | info->gen, |
1238 | dev_priv->dev->pdev->device, |
1246 | dev_priv->dev->pdev->device, |
1239 | DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA)); |
1247 | DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA)); |
1240 | #undef PRINT_S |
1248 | #undef PRINT_S |
1241 | #undef SEP_EMPTY |
1249 | #undef SEP_EMPTY |
1242 | #undef PRINT_FLAG |
1250 | #undef PRINT_FLAG |
1243 | #undef SEP_COMMA |
1251 | #undef SEP_COMMA |
1244 | } |
1252 | } |
1245 | 1253 | ||
1246 | /** |
1254 | /** |
1247 | * i915_driver_load - setup chip and create an initial config |
1255 | * i915_driver_load - setup chip and create an initial config |
1248 | * @dev: DRM device |
1256 | * @dev: DRM device |
1249 | * @flags: startup flags |
1257 | * @flags: startup flags |
1250 | * |
1258 | * |
1251 | * The driver load routine has to do several things: |
1259 | * The driver load routine has to do several things: |
1252 | * - drive output discovery via intel_modeset_init() |
1260 | * - drive output discovery via intel_modeset_init() |
1253 | * - initialize the memory manager |
1261 | * - initialize the memory manager |
1254 | * - allocate initial config memory |
1262 | * - allocate initial config memory |
1255 | * - setup the DRM framebuffer with the allocated memory |
1263 | * - setup the DRM framebuffer with the allocated memory |
1256 | */ |
1264 | */ |
1257 | int i915_driver_load(struct drm_device *dev, unsigned long flags) |
1265 | int i915_driver_load(struct drm_device *dev, unsigned long flags) |
1258 | { |
1266 | { |
1259 | struct drm_i915_private *dev_priv; |
1267 | struct drm_i915_private *dev_priv; |
1260 | struct intel_device_info *info; |
1268 | struct intel_device_info *info; |
1261 | int ret = 0, mmio_bar, mmio_size; |
1269 | int ret = 0, mmio_bar, mmio_size; |
1262 | uint32_t aperture_size; |
1270 | uint32_t aperture_size; |
1263 | 1271 | ||
1264 | info = (struct intel_device_info *) flags; |
1272 | info = (struct intel_device_info *) flags; |
1265 | 1273 | ||
1266 | 1274 | ||
1267 | dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); |
1275 | dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); |
1268 | if (dev_priv == NULL) |
1276 | if (dev_priv == NULL) |
1269 | return -ENOMEM; |
1277 | return -ENOMEM; |
1270 | 1278 | ||
1271 | dev->dev_private = (void *)dev_priv; |
1279 | dev->dev_private = (void *)dev_priv; |
1272 | dev_priv->dev = dev; |
1280 | dev_priv->dev = dev; |
1273 | dev_priv->info = info; |
1281 | dev_priv->info = info; |
1274 | 1282 | ||
1275 | spin_lock_init(&dev_priv->irq_lock); |
1283 | spin_lock_init(&dev_priv->irq_lock); |
1276 | spin_lock_init(&dev_priv->gpu_error.lock); |
1284 | spin_lock_init(&dev_priv->gpu_error.lock); |
1277 | spin_lock_init(&dev_priv->backlight.lock); |
1285 | spin_lock_init(&dev_priv->backlight.lock); |
1278 | spin_lock_init(&dev_priv->uncore.lock); |
1286 | spin_lock_init(&dev_priv->uncore.lock); |
1279 | spin_lock_init(&dev_priv->mm.object_stat_lock); |
1287 | spin_lock_init(&dev_priv->mm.object_stat_lock); |
1280 | mutex_init(&dev_priv->dpio_lock); |
1288 | mutex_init(&dev_priv->dpio_lock); |
1281 | mutex_init(&dev_priv->rps.hw_lock); |
1289 | mutex_init(&dev_priv->rps.hw_lock); |
1282 | mutex_init(&dev_priv->modeset_restore_lock); |
1290 | mutex_init(&dev_priv->modeset_restore_lock); |
1283 | 1291 | ||
1284 | mutex_init(&dev_priv->pc8.lock); |
1292 | mutex_init(&dev_priv->pc8.lock); |
1285 | dev_priv->pc8.requirements_met = false; |
1293 | dev_priv->pc8.requirements_met = false; |
1286 | dev_priv->pc8.gpu_idle = false; |
1294 | dev_priv->pc8.gpu_idle = false; |
1287 | dev_priv->pc8.irqs_disabled = false; |
1295 | dev_priv->pc8.irqs_disabled = false; |
1288 | dev_priv->pc8.enabled = false; |
1296 | dev_priv->pc8.enabled = false; |
1289 | dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ |
1297 | dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ |
1290 | INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); |
1298 | INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); |
1291 | 1299 | ||
1292 | i915_dump_device_info(dev_priv); |
1300 | i915_dump_device_info(dev_priv); |
1293 | 1301 | ||
1294 | /* Not all pre-production machines fall into this category, only the |
1302 | /* Not all pre-production machines fall into this category, only the |
1295 | * very first ones. Almost everything should work, except for maybe |
1303 | * very first ones. Almost everything should work, except for maybe |
1296 | * suspend/resume. And we don't implement workarounds that affect only |
1304 | * suspend/resume. And we don't implement workarounds that affect only |
1297 | * pre-production machines. */ |
1305 | * pre-production machines. */ |
1298 | if (IS_HSW_EARLY_SDV(dev)) |
1306 | if (IS_HSW_EARLY_SDV(dev)) |
1299 | DRM_INFO("This is an early pre-production Haswell machine. " |
1307 | DRM_INFO("This is an early pre-production Haswell machine. " |
1300 | "It may not be fully functional.\n"); |
1308 | "It may not be fully functional.\n"); |
1301 | 1309 | ||
1302 | if (i915_get_bridge_dev(dev)) { |
1310 | if (i915_get_bridge_dev(dev)) { |
1303 | ret = -EIO; |
1311 | ret = -EIO; |
1304 | goto free_priv; |
1312 | goto free_priv; |
1305 | } |
1313 | } |
1306 | 1314 | ||
1307 | mmio_bar = IS_GEN2(dev) ? 1 : 0; |
1315 | mmio_bar = IS_GEN2(dev) ? 1 : 0; |
1308 | /* Before gen4, the registers and the GTT are behind different BARs. |
1316 | /* Before gen4, the registers and the GTT are behind different BARs. |
1309 | * However, from gen4 onwards, the registers and the GTT are shared |
1317 | * However, from gen4 onwards, the registers and the GTT are shared |
1310 | * in the same BAR, so we want to restrict this ioremap from |
1318 | * in the same BAR, so we want to restrict this ioremap from |
1311 | * clobbering the GTT which we want ioremap_wc instead. Fortunately, |
1319 | * clobbering the GTT which we want ioremap_wc instead. Fortunately, |
1312 | * the register BAR remains the same size for all the earlier |
1320 | * the register BAR remains the same size for all the earlier |
1313 | * generations up to Ironlake. |
1321 | * generations up to Ironlake. |
1314 | */ |
1322 | */ |
1315 | if (info->gen < 5) |
1323 | if (info->gen < 5) |
1316 | mmio_size = 512*1024; |
1324 | mmio_size = 512*1024; |
1317 | else |
1325 | else |
1318 | mmio_size = 2*1024*1024; |
1326 | mmio_size = 2*1024*1024; |
1319 | 1327 | ||
1320 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); |
1328 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); |
1321 | if (!dev_priv->regs) { |
1329 | if (!dev_priv->regs) { |
1322 | DRM_ERROR("failed to map registers\n"); |
1330 | DRM_ERROR("failed to map registers\n"); |
1323 | ret = -EIO; |
1331 | ret = -EIO; |
1324 | goto put_bridge; |
1332 | goto put_bridge; |
1325 | } |
1333 | } |
1326 | 1334 | ||
1327 | intel_uncore_early_sanitize(dev); |
1335 | intel_uncore_early_sanitize(dev); |
1328 | 1336 | ||
1329 | if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) { |
1337 | if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) { |
1330 | /* The docs do not explain exactly how the calculation can be |
1338 | /* The docs do not explain exactly how the calculation can be |
1331 | * made. It is somewhat guessable, but for now, it's always |
1339 | * made. It is somewhat guessable, but for now, it's always |
1332 | * 128MB. |
1340 | * 128MB. |
1333 | * NB: We can't write IDICR yet because we do not have gt funcs |
1341 | * NB: We can't write IDICR yet because we do not have gt funcs |
1334 | * set up */ |
1342 | * set up */ |
1335 | dev_priv->ellc_size = 128; |
1343 | dev_priv->ellc_size = 128; |
1336 | DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); |
1344 | DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); |
1337 | } |
1345 | } |
1338 | 1346 | ||
1339 | ret = i915_gem_gtt_init(dev); |
1347 | ret = i915_gem_gtt_init(dev); |
1340 | if (ret) |
1348 | if (ret) |
1341 | goto put_bridge; |
1349 | goto put_bridge; |
1342 | 1350 | ||
1343 | 1351 | ||
1344 | pci_set_master(dev->pdev); |
1352 | pci_set_master(dev->pdev); |
1345 | 1353 | ||
1346 | /* overlay on gen2 is broken and can't address above 1G */ |
1354 | /* overlay on gen2 is broken and can't address above 1G */ |
1347 | 1355 | ||
1348 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) |
1356 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) |
1349 | * using 32bit addressing, overwriting memory if HWS is located |
1357 | * using 32bit addressing, overwriting memory if HWS is located |
1350 | * above 4GB. |
1358 | * above 4GB. |
1351 | * |
1359 | * |
1352 | * The documentation also mentions an issue with undefined |
1360 | * The documentation also mentions an issue with undefined |
1353 | * behaviour if any general state is accessed within a page above 4GB, |
1361 | * behaviour if any general state is accessed within a page above 4GB, |
1354 | * which also needs to be handled carefully. |
1362 | * which also needs to be handled carefully. |
1355 | */ |
1363 | */ |
1356 | 1364 | ||
1357 | aperture_size = dev_priv->gtt.mappable_end; |
1365 | aperture_size = dev_priv->gtt.mappable_end; |
1358 | 1366 | ||
1359 | 1367 | ||
1360 | /* The i915 workqueue is primarily used for batched retirement of |
1368 | /* The i915 workqueue is primarily used for batched retirement of |
1361 | * requests (and thus managing bo) once the task has been completed |
1369 | * requests (and thus managing bo) once the task has been completed |
1362 | * by the GPU. i915_gem_retire_requests() is called directly when we |
1370 | * by the GPU. i915_gem_retire_requests() is called directly when we |
1363 | * need high-priority retirement, such as waiting for an explicit |
1371 | * need high-priority retirement, such as waiting for an explicit |
1364 | * bo. |
1372 | * bo. |
1365 | * |
1373 | * |
1366 | * It is also used for periodic low-priority events, such as |
1374 | * It is also used for periodic low-priority events, such as |
1367 | * idle-timers and recording error state. |
1375 | * idle-timers and recording error state. |
1368 | * |
1376 | * |
1369 | * All tasks on the workqueue are expected to acquire the dev mutex |
1377 | * All tasks on the workqueue are expected to acquire the dev mutex |
1370 | * so there is no point in running more than one instance of the |
1378 | * so there is no point in running more than one instance of the |
1371 | * workqueue at any time. Use an ordered one. |
1379 | * workqueue at any time. Use an ordered one. |
1372 | */ |
1380 | */ |
1373 | dev_priv->wq = alloc_ordered_workqueue("i915", 0); |
1381 | dev_priv->wq = alloc_ordered_workqueue("i915", 0); |
1374 | if (dev_priv->wq == NULL) { |
1382 | if (dev_priv->wq == NULL) { |
1375 | DRM_ERROR("Failed to create our workqueue.\n"); |
1383 | DRM_ERROR("Failed to create our workqueue.\n"); |
1376 | ret = -ENOMEM; |
1384 | ret = -ENOMEM; |
1377 | goto out_mtrrfree; |
1385 | goto out_mtrrfree; |
1378 | } |
1386 | } |
1379 | system_wq = dev_priv->wq; |
1387 | system_wq = dev_priv->wq; |
1380 | 1388 | ||
1381 | /* This must be called before any calls to HAS_PCH_* */ |
1389 | /* This must be called before any calls to HAS_PCH_* */ |
1382 | intel_detect_pch(dev); |
1390 | intel_detect_pch(dev); |
1383 | 1391 | ||
1384 | intel_irq_init(dev); |
1392 | intel_irq_init(dev); |
1385 | intel_pm_init(dev); |
1393 | intel_pm_init(dev); |
1386 | intel_uncore_sanitize(dev); |
1394 | intel_uncore_sanitize(dev); |
1387 | intel_uncore_init(dev); |
1395 | intel_uncore_init(dev); |
1388 | 1396 | ||
1389 | /* Try to make sure MCHBAR is enabled before poking at it */ |
1397 | /* Try to make sure MCHBAR is enabled before poking at it */ |
1390 | intel_setup_mchbar(dev); |
1398 | intel_setup_mchbar(dev); |
1391 | intel_setup_gmbus(dev); |
1399 | intel_setup_gmbus(dev); |
1392 | intel_opregion_setup(dev); |
1400 | intel_opregion_setup(dev); |
1393 | 1401 | ||
1394 | intel_setup_bios(dev); |
1402 | intel_setup_bios(dev); |
1395 | 1403 | ||
1396 | i915_gem_load(dev); |
1404 | i915_gem_load(dev); |
1397 | 1405 | ||
1398 | /* On the 945G/GM, the chipset reports the MSI capability on the |
1406 | /* On the 945G/GM, the chipset reports the MSI capability on the |
1399 | * integrated graphics even though the support isn't actually there |
1407 | * integrated graphics even though the support isn't actually there |
1400 | * according to the published specs. It doesn't appear to function |
1408 | * according to the published specs. It doesn't appear to function |
1401 | * correctly in testing on 945G. |
1409 | * correctly in testing on 945G. |
1402 | * This may be a side effect of MSI having been made available for PEG |
1410 | * This may be a side effect of MSI having been made available for PEG |
1403 | * and the registers being closely associated. |
1411 | * and the registers being closely associated. |
1404 | * |
1412 | * |
1405 | * According to chipset errata, on the 965GM, MSI interrupts may |
1413 | * According to chipset errata, on the 965GM, MSI interrupts may |
1406 | * be lost or delayed, but we use them anyways to avoid |
1414 | * be lost or delayed, but we use them anyways to avoid |
1407 | * stuck interrupts on some machines. |
1415 | * stuck interrupts on some machines. |
1408 | */ |
1416 | */ |
1409 | 1417 | ||
1410 | dev_priv->num_plane = 1; |
1418 | dev_priv->num_plane = 1; |
1411 | if (IS_VALLEYVIEW(dev)) |
1419 | if (IS_VALLEYVIEW(dev)) |
1412 | dev_priv->num_plane = 2; |
1420 | dev_priv->num_plane = 2; |
1413 | 1421 | ||
1414 | if (HAS_POWER_WELL(dev)) |
1422 | if (HAS_POWER_WELL(dev)) |
1415 | i915_init_power_well(dev); |
1423 | i915_init_power_well(dev); |
1416 | ret = i915_load_modeset_init(dev); |
1424 | ret = i915_load_modeset_init(dev); |
1417 | if (ret < 0) { |
1425 | if (ret < 0) { |
1418 | DRM_ERROR("failed to init modeset\n"); |
1426 | DRM_ERROR("failed to init modeset\n"); |
1419 | goto out_gem_unload; |
1427 | goto out_gem_unload; |
1420 | } |
1428 | } |
1421 | 1429 | ||
1422 | if (INTEL_INFO(dev)->num_pipes) { |
1430 | if (INTEL_INFO(dev)->num_pipes) { |
1423 | /* Must be done after probing outputs */ |
1431 | /* Must be done after probing outputs */ |
1424 | intel_opregion_init(dev); |
1432 | intel_opregion_init(dev); |
1425 | // acpi_video_register(); |
1433 | // acpi_video_register(); |
1426 | } |
1434 | } |
1427 | 1435 | ||
1428 | if (IS_GEN5(dev)) |
1436 | if (IS_GEN5(dev)) |
1429 | intel_gpu_ips_init(dev_priv); |
1437 | intel_gpu_ips_init(dev_priv); |
1430 | 1438 | ||
1431 | main_device = dev; |
1439 | main_device = dev; |
1432 | 1440 | ||
1433 | return 0; |
1441 | return 0; |
1434 | 1442 | ||
1435 | out_gem_unload: |
1443 | out_gem_unload: |
1436 | // if (dev_priv->mm.inactive_shrinker.shrink) |
1444 | // if (dev_priv->mm.inactive_shrinker.shrink) |
1437 | // unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
1445 | // unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
1438 | 1446 | ||
1439 | // if (dev->pdev->msi_enabled) |
1447 | // if (dev->pdev->msi_enabled) |
1440 | // pci_disable_msi(dev->pdev); |
1448 | // pci_disable_msi(dev->pdev); |
1441 | 1449 | ||
1442 | // intel_teardown_gmbus(dev); |
1450 | // intel_teardown_gmbus(dev); |
1443 | // intel_teardown_mchbar(dev); |
1451 | // intel_teardown_mchbar(dev); |
1444 | // destroy_workqueue(dev_priv->wq); |
1452 | // destroy_workqueue(dev_priv->wq); |
1445 | out_mtrrfree: |
1453 | out_mtrrfree: |
1446 | // arch_phys_wc_del(dev_priv->mm.gtt_mtrr); |
1454 | // arch_phys_wc_del(dev_priv->mm.gtt_mtrr); |
1447 | // io_mapping_free(dev_priv->gtt.mappable); |
1455 | // io_mapping_free(dev_priv->gtt.mappable); |
1448 | // dev_priv->gtt.gtt_remove(dev); |
1456 | // dev_priv->gtt.gtt_remove(dev); |
1449 | out_rmmap: |
1457 | out_rmmap: |
1450 | pci_iounmap(dev->pdev, dev_priv->regs); |
1458 | pci_iounmap(dev->pdev, dev_priv->regs); |
1451 | put_bridge: |
1459 | put_bridge: |
1452 | // pci_dev_put(dev_priv->bridge_dev); |
1460 | // pci_dev_put(dev_priv->bridge_dev); |
1453 | free_priv: |
1461 | free_priv: |
1454 | kfree(dev_priv); |
1462 | kfree(dev_priv); |
1455 | return ret; |
1463 | return ret; |
1456 | } |
1464 | } |
1457 | 1465 | ||
1458 | #if 0 |
1466 | #if 0 |
1459 | 1467 | ||
1460 | int i915_driver_unload(struct drm_device *dev) |
1468 | int i915_driver_unload(struct drm_device *dev) |
1461 | { |
1469 | { |
1462 | struct drm_i915_private *dev_priv = dev->dev_private; |
1470 | struct drm_i915_private *dev_priv = dev->dev_private; |
1463 | int ret; |
1471 | int ret; |
1464 | 1472 | ||
1465 | intel_gpu_ips_teardown(); |
1473 | intel_gpu_ips_teardown(); |
1466 | 1474 | ||
1467 | if (HAS_POWER_WELL(dev)) { |
1475 | if (HAS_POWER_WELL(dev)) { |
1468 | /* The i915.ko module is still not prepared to be loaded when |
1476 | /* The i915.ko module is still not prepared to be loaded when |
1469 | * the power well is not enabled, so just enable it in case |
1477 | * the power well is not enabled, so just enable it in case |
1470 | * we're going to unload/reload. */ |
1478 | * we're going to unload/reload. */ |
1471 | intel_set_power_well(dev, true); |
1479 | intel_set_power_well(dev, true); |
1472 | i915_remove_power_well(dev); |
1480 | i915_remove_power_well(dev); |
1473 | } |
1481 | } |
1474 | 1482 | ||
1475 | i915_teardown_sysfs(dev); |
1483 | i915_teardown_sysfs(dev); |
1476 | 1484 | ||
1477 | if (dev_priv->mm.inactive_shrinker.scan_objects) |
1485 | if (dev_priv->mm.inactive_shrinker.scan_objects) |
1478 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
1486 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
1479 | 1487 | ||
1480 | mutex_lock(&dev->struct_mutex); |
1488 | mutex_lock(&dev->struct_mutex); |
1481 | ret = i915_gpu_idle(dev); |
1489 | ret = i915_gpu_idle(dev); |
1482 | if (ret) |
1490 | if (ret) |
1483 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
1491 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
1484 | i915_gem_retire_requests(dev); |
1492 | i915_gem_retire_requests(dev); |
1485 | mutex_unlock(&dev->struct_mutex); |
1493 | mutex_unlock(&dev->struct_mutex); |
1486 | 1494 | ||
1487 | /* Cancel the retire work handler, which should be idle now. */ |
1495 | /* Cancel the retire work handler, which should be idle now. */ |
1488 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); |
1496 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); |
1489 | 1497 | ||
1490 | io_mapping_free(dev_priv->gtt.mappable); |
1498 | io_mapping_free(dev_priv->gtt.mappable); |
1491 | arch_phys_wc_del(dev_priv->gtt.mtrr); |
1499 | arch_phys_wc_del(dev_priv->gtt.mtrr); |
1492 | 1500 | ||
1493 | acpi_video_unregister(); |
1501 | acpi_video_unregister(); |
1494 | 1502 | ||
1495 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1503 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1496 | intel_fbdev_fini(dev); |
1504 | intel_fbdev_fini(dev); |
1497 | intel_modeset_cleanup(dev); |
1505 | intel_modeset_cleanup(dev); |
1498 | cancel_work_sync(&dev_priv->console_resume_work); |
1506 | cancel_work_sync(&dev_priv->console_resume_work); |
1499 | 1507 | ||
1500 | /* |
1508 | /* |
1501 | * free the memory space allocated for the child device |
1509 | * free the memory space allocated for the child device |
1502 | * config parsed from VBT |
1510 | * config parsed from VBT |
1503 | */ |
1511 | */ |
1504 | if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { |
1512 | if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { |
1505 | kfree(dev_priv->vbt.child_dev); |
1513 | kfree(dev_priv->vbt.child_dev); |
1506 | dev_priv->vbt.child_dev = NULL; |
1514 | dev_priv->vbt.child_dev = NULL; |
1507 | dev_priv->vbt.child_dev_num = 0; |
1515 | dev_priv->vbt.child_dev_num = 0; |
1508 | } |
1516 | } |
1509 | 1517 | ||
1510 | vga_switcheroo_unregister_client(dev->pdev); |
1518 | vga_switcheroo_unregister_client(dev->pdev); |
1511 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
1519 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
1512 | } |
1520 | } |
1513 | 1521 | ||
1514 | /* Free error state after interrupts are fully disabled. */ |
1522 | /* Free error state after interrupts are fully disabled. */ |
1515 | del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); |
1523 | del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); |
1516 | cancel_work_sync(&dev_priv->gpu_error.work); |
1524 | cancel_work_sync(&dev_priv->gpu_error.work); |
1517 | i915_destroy_error_state(dev); |
1525 | i915_destroy_error_state(dev); |
1518 | 1526 | ||
1519 | if (dev->pdev->msi_enabled) |
1527 | if (dev->pdev->msi_enabled) |
1520 | pci_disable_msi(dev->pdev); |
1528 | pci_disable_msi(dev->pdev); |
1521 | 1529 | ||
1522 | intel_opregion_fini(dev); |
1530 | intel_opregion_fini(dev); |
1523 | 1531 | ||
1524 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1532 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1525 | /* Flush any outstanding unpin_work. */ |
1533 | /* Flush any outstanding unpin_work. */ |
1526 | flush_workqueue(dev_priv->wq); |
1534 | flush_workqueue(dev_priv->wq); |
1527 | 1535 | ||
1528 | mutex_lock(&dev->struct_mutex); |
1536 | mutex_lock(&dev->struct_mutex); |
1529 | i915_gem_free_all_phys_object(dev); |
1537 | i915_gem_free_all_phys_object(dev); |
1530 | i915_gem_cleanup_ringbuffer(dev); |
1538 | i915_gem_cleanup_ringbuffer(dev); |
1531 | i915_gem_context_fini(dev); |
1539 | i915_gem_context_fini(dev); |
1532 | mutex_unlock(&dev->struct_mutex); |
1540 | mutex_unlock(&dev->struct_mutex); |
1533 | i915_gem_cleanup_aliasing_ppgtt(dev); |
1541 | i915_gem_cleanup_aliasing_ppgtt(dev); |
1534 | i915_gem_cleanup_stolen(dev); |
1542 | i915_gem_cleanup_stolen(dev); |
1535 | 1543 | ||
1536 | if (!I915_NEED_GFX_HWS(dev)) |
1544 | if (!I915_NEED_GFX_HWS(dev)) |
1537 | i915_free_hws(dev); |
1545 | i915_free_hws(dev); |
1538 | } |
1546 | } |
1539 | 1547 | ||
1540 | list_del(&dev_priv->gtt.base.global_link); |
1548 | list_del(&dev_priv->gtt.base.global_link); |
1541 | WARN_ON(!list_empty(&dev_priv->vm_list)); |
1549 | WARN_ON(!list_empty(&dev_priv->vm_list)); |
1542 | drm_mm_takedown(&dev_priv->gtt.base.mm); |
1550 | drm_mm_takedown(&dev_priv->gtt.base.mm); |
1543 | if (dev_priv->regs != NULL) |
1551 | if (dev_priv->regs != NULL) |
1544 | pci_iounmap(dev->pdev, dev_priv->regs); |
1552 | pci_iounmap(dev->pdev, dev_priv->regs); |
1545 | 1553 | ||
1546 | intel_teardown_gmbus(dev); |
1554 | intel_teardown_gmbus(dev); |
1547 | intel_teardown_mchbar(dev); |
1555 | intel_teardown_mchbar(dev); |
1548 | 1556 | ||
1549 | destroy_workqueue(dev_priv->wq); |
1557 | destroy_workqueue(dev_priv->wq); |
1550 | pm_qos_remove_request(&dev_priv->pm_qos); |
1558 | pm_qos_remove_request(&dev_priv->pm_qos); |
1551 | 1559 | ||
1552 | dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); |
1560 | dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); |
1553 | 1561 | ||
1554 | if (dev_priv->slab) |
1562 | if (dev_priv->slab) |
1555 | kmem_cache_destroy(dev_priv->slab); |
1563 | kmem_cache_destroy(dev_priv->slab); |
1556 | 1564 | ||
1557 | pci_dev_put(dev_priv->bridge_dev); |
1565 | pci_dev_put(dev_priv->bridge_dev); |
1558 | kfree(dev->dev_private); |
1566 | kfree(dev->dev_private); |
1559 | 1567 | ||
1560 | return 0; |
1568 | return 0; |
1561 | } |
1569 | } |
1562 | #endif |
1570 | #endif |
1563 | 1571 | ||
1564 | int i915_driver_open(struct drm_device *dev, struct drm_file *file) |
1572 | int i915_driver_open(struct drm_device *dev, struct drm_file *file) |
1565 | { |
1573 | { |
1566 | struct drm_i915_file_private *file_priv; |
1574 | struct drm_i915_file_private *file_priv; |
1567 | 1575 | ||
1568 | DRM_DEBUG_DRIVER("\n"); |
1576 | DRM_DEBUG_DRIVER("\n"); |
1569 | file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); |
1577 | file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); |
1570 | if (!file_priv) |
1578 | if (!file_priv) |
1571 | return -ENOMEM; |
1579 | return -ENOMEM; |
1572 | 1580 | ||
1573 | file->driver_priv = file_priv; |
1581 | file->driver_priv = file_priv; |
1574 | 1582 | ||
1575 | spin_lock_init(&file_priv->mm.lock); |
1583 | spin_lock_init(&file_priv->mm.lock); |
1576 | INIT_LIST_HEAD(&file_priv->mm.request_list); |
1584 | INIT_LIST_HEAD(&file_priv->mm.request_list); |
1577 | 1585 | ||
1578 | idr_init(&file_priv->context_idr); |
1586 | idr_init(&file_priv->context_idr); |
1579 | 1587 | ||
1580 | return 0; |
1588 | return 0; |
1581 | } |
1589 | } |
1582 | 1590 | ||
1583 | #if 0 |
1591 | #if 0 |
1584 | /** |
1592 | /** |
1585 | * i915_driver_lastclose - clean up after all DRM clients have exited |
1593 | * i915_driver_lastclose - clean up after all DRM clients have exited |
1586 | * @dev: DRM device |
1594 | * @dev: DRM device |
1587 | * |
1595 | * |
1588 | * Take care of cleaning up after all DRM clients have exited. In the |
1596 | * Take care of cleaning up after all DRM clients have exited. In the |
1589 | * mode setting case, we want to restore the kernel's initial mode (just |
1597 | * mode setting case, we want to restore the kernel's initial mode (just |
1590 | * in case the last client left us in a bad state). |
1598 | * in case the last client left us in a bad state). |
1591 | * |
1599 | * |
1592 | * Additionally, in the non-mode setting case, we'll tear down the GTT |
1600 | * Additionally, in the non-mode setting case, we'll tear down the GTT |
1593 | * and DMA structures, since the kernel won't be using them, and clea |
1601 | * and DMA structures, since the kernel won't be using them, and clea |
1594 | * up any GEM state. |
1602 | * up any GEM state. |
1595 | */ |
1603 | */ |
1596 | void i915_driver_lastclose(struct drm_device * dev) |
1604 | void i915_driver_lastclose(struct drm_device * dev) |
1597 | { |
1605 | { |
1598 | drm_i915_private_t *dev_priv = dev->dev_private; |
1606 | drm_i915_private_t *dev_priv = dev->dev_private; |
1599 | 1607 | ||
1600 | /* On gen6+ we refuse to init without kms enabled, but then the drm core |
1608 | /* On gen6+ we refuse to init without kms enabled, but then the drm core |
1601 | * goes right around and calls lastclose. Check for this and don't clean |
1609 | * goes right around and calls lastclose. Check for this and don't clean |
1602 | * up anything. */ |
1610 | * up anything. */ |
1603 | if (!dev_priv) |
1611 | if (!dev_priv) |
1604 | return; |
1612 | return; |
1605 | 1613 | ||
1606 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1614 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1607 | intel_fb_restore_mode(dev); |
1615 | intel_fb_restore_mode(dev); |
1608 | vga_switcheroo_process_delayed_switch(); |
1616 | vga_switcheroo_process_delayed_switch(); |
1609 | return; |
1617 | return; |
1610 | } |
1618 | } |
1611 | 1619 | ||
1612 | i915_gem_lastclose(dev); |
1620 | i915_gem_lastclose(dev); |
1613 | 1621 | ||
1614 | i915_dma_cleanup(dev); |
1622 | i915_dma_cleanup(dev); |
1615 | } |
1623 | } |
1616 | 1624 | ||
1617 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) |
1625 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) |
1618 | { |
1626 | { |
1619 | i915_gem_context_close(dev, file_priv); |
1627 | i915_gem_context_close(dev, file_priv); |
1620 | i915_gem_release(dev, file_priv); |
1628 | i915_gem_release(dev, file_priv); |
1621 | } |
1629 | } |
1622 | 1630 | ||
1623 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) |
1631 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) |
1624 | { |
1632 | { |
1625 | struct drm_i915_file_private *file_priv = file->driver_priv; |
1633 | struct drm_i915_file_private *file_priv = file->driver_priv; |
1626 | 1634 | ||
1627 | kfree(file_priv); |
1635 | kfree(file_priv); |
1628 | } |
1636 | } |
1629 | 1637 | ||
1630 | const struct drm_ioctl_desc i915_ioctls[] = { |
1638 | const struct drm_ioctl_desc i915_ioctls[] = { |
1631 | DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1639 | DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1632 | DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), |
1640 | DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), |
1633 | DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), |
1641 | DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), |
1634 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), |
1642 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), |
1635 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), |
1643 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), |
1636 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), |
1644 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), |
1637 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), |
1645 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), |
1638 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1646 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1639 | DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), |
1647 | DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), |
1640 | DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), |
1648 | DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), |
1641 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1649 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1642 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), |
1650 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), |
1643 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1651 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1644 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1652 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1645 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), |
1653 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), |
1646 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), |
1654 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), |
1647 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1655 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1648 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1656 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1649 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), |
1657 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), |
1650 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1658 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1651 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1659 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1652 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1660 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1653 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1661 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1654 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1662 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1655 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1663 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1656 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1664 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1657 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1665 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1658 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1666 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1659 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1667 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1660 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1668 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1661 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1669 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1662 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1670 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1663 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1671 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1664 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1672 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1665 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1673 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1666 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1674 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1667 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1675 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1668 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1676 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1669 | DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), |
1677 | DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), |
1670 | DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1678 | DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1671 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
1679 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
1672 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
1680 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
1673 | DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
1681 | DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
1674 | DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
1682 | DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
1675 | DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1683 | DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1676 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1684 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1677 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1685 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1678 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1686 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1679 | }; |
1687 | }; |
1680 | 1688 | ||
1681 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); |
1689 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); |
1682 | 1690 | ||
1683 | /* |
1691 | /* |
1684 | * This is really ugly: Because old userspace abused the linux agp interface to |
1692 | * This is really ugly: Because old userspace abused the linux agp interface to |
1685 | * manage the gtt, we need to claim that all intel devices are agp. For |
1693 | * manage the gtt, we need to claim that all intel devices are agp. For |
1686 | * otherwise the drm core refuses to initialize the agp support code. |
1694 | * otherwise the drm core refuses to initialize the agp support code. |
1687 | */ |
1695 | */ |
1688 | int i915_driver_device_is_agp(struct drm_device * dev) |
1696 | int i915_driver_device_is_agp(struct drm_device * dev) |
1689 | { |
1697 | { |
1690 | return 1; |
1698 | return 1; |
1691 | } |
1699 | } |
1692 | #endif |
1700 | #endif>>><>>><>>>><>><>><>>>>>><>><>><>><>><>=>=>=>=>>>><>><>><>=>>> |
1693 | 1701 | ||
1694 | - | ||
1695 | int gem_getparam(struct drm_device *dev, void *data) |
- | |
1696 | { |
- | |
1697 | return i915_getparam(dev, data, NULL); |
- | |
1698 | };>>><>>><>>>><>><>><>>>>>><>><>><>><>><>=>=>=>=>>>><>><>><>=>>> |
- |