Rev 4398 | Rev 5060 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4398 | Rev 4539 | ||
---|---|---|---|
1 | /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- |
1 | /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- |
2 | */ |
2 | */ |
3 | /* |
3 | /* |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
5 | * All Rights Reserved. |
5 | * All Rights Reserved. |
6 | * |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * Permission is hereby granted, free of charge, to any person obtaining a |
8 | * copy of this software and associated documentation files (the |
8 | * copy of this software and associated documentation files (the |
9 | * "Software"), to deal in the Software without restriction, including |
9 | * "Software"), to deal in the Software without restriction, including |
10 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * without limitation the rights to use, copy, modify, merge, publish, |
11 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * distribute, sub license, and/or sell copies of the Software, and to |
12 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * permit persons to whom the Software is furnished to do so, subject to |
13 | * the following conditions: |
13 | * the following conditions: |
14 | * |
14 | * |
15 | * The above copyright notice and this permission notice (including the |
15 | * The above copyright notice and this permission notice (including the |
16 | * next paragraph) shall be included in all copies or substantial portions |
16 | * next paragraph) shall be included in all copies or substantial portions |
17 | * of the Software. |
17 | * of the Software. |
18 | * |
18 | * |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
26 | * |
26 | * |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
30 | 30 | ||
31 | #include |
31 | #include |
32 | #include |
32 | #include |
33 | #include |
33 | #include |
34 | #include "intel_drv.h" |
34 | #include "intel_drv.h" |
35 | #include |
35 | #include |
36 | #include "i915_drv.h" |
36 | #include "i915_drv.h" |
37 | #include "i915_trace.h" |
37 | #include "i915_trace.h" |
38 | #include |
38 | #include |
39 | //#include |
39 | //#include |
40 | //#include |
40 | //#include |
41 | //#include |
41 | //#include |
42 | //#include |
42 | //#include |
43 | #include |
43 | #include |
44 | //#include |
44 | //#include |
45 | 45 | ||
46 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen); |
46 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen); |
47 | 47 | ||
48 | #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) |
48 | #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) |
49 | 49 | ||
50 | #define BEGIN_LP_RING(n) \ |
50 | #define BEGIN_LP_RING(n) \ |
51 | intel_ring_begin(LP_RING(dev_priv), (n)) |
51 | intel_ring_begin(LP_RING(dev_priv), (n)) |
52 | 52 | ||
53 | #define OUT_RING(x) \ |
53 | #define OUT_RING(x) \ |
54 | intel_ring_emit(LP_RING(dev_priv), x) |
54 | intel_ring_emit(LP_RING(dev_priv), x) |
55 | 55 | ||
56 | #define ADVANCE_LP_RING() \ |
56 | #define ADVANCE_LP_RING() \ |
57 | intel_ring_advance(LP_RING(dev_priv)) |
57 | intel_ring_advance(LP_RING(dev_priv)) |
58 | 58 | ||
59 | /** |
59 | /** |
60 | * Lock test for when it's just for synchronization of ring access. |
60 | * Lock test for when it's just for synchronization of ring access. |
61 | * |
61 | * |
62 | * In that case, we don't need to do it when GEM is initialized as nobody else |
62 | * In that case, we don't need to do it when GEM is initialized as nobody else |
63 | * has access to the ring. |
63 | * has access to the ring. |
64 | */ |
64 | */ |
65 | #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ |
65 | #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ |
66 | if (LP_RING(dev->dev_private)->obj == NULL) \ |
66 | if (LP_RING(dev->dev_private)->obj == NULL) \ |
67 | LOCK_TEST_WITH_RETURN(dev, file); \ |
67 | LOCK_TEST_WITH_RETURN(dev, file); \ |
68 | } while (0) |
68 | } while (0) |
69 | 69 | ||
70 | static inline u32 |
70 | static inline u32 |
71 | intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg) |
71 | intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg) |
72 | { |
72 | { |
73 | if (I915_NEED_GFX_HWS(dev_priv->dev)) |
73 | if (I915_NEED_GFX_HWS(dev_priv->dev)) |
74 | return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg); |
74 | return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg); |
75 | else |
75 | else |
76 | return intel_read_status_page(LP_RING(dev_priv), reg); |
76 | return intel_read_status_page(LP_RING(dev_priv), reg); |
77 | } |
77 | } |
78 | 78 | ||
79 | #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg) |
79 | #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg) |
80 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) |
80 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) |
81 | #define I915_BREADCRUMB_INDEX 0x21 |
81 | #define I915_BREADCRUMB_INDEX 0x21 |
82 | 82 | ||
83 | void i915_update_dri1_breadcrumb(struct drm_device *dev) |
83 | void i915_update_dri1_breadcrumb(struct drm_device *dev) |
84 | { |
84 | { |
85 | drm_i915_private_t *dev_priv = dev->dev_private; |
85 | drm_i915_private_t *dev_priv = dev->dev_private; |
86 | struct drm_i915_master_private *master_priv; |
86 | struct drm_i915_master_private *master_priv; |
87 | 87 | ||
88 | if (dev->primary->master) { |
88 | if (dev->primary->master) { |
89 | master_priv = dev->primary->master->driver_priv; |
89 | master_priv = dev->primary->master->driver_priv; |
90 | if (master_priv->sarea_priv) |
90 | if (master_priv->sarea_priv) |
91 | master_priv->sarea_priv->last_dispatch = |
91 | master_priv->sarea_priv->last_dispatch = |
92 | READ_BREADCRUMB(dev_priv); |
92 | READ_BREADCRUMB(dev_priv); |
93 | } |
93 | } |
94 | } |
94 | } |
95 | 95 | ||
96 | static void i915_write_hws_pga(struct drm_device *dev) |
96 | static void i915_write_hws_pga(struct drm_device *dev) |
97 | { |
97 | { |
98 | drm_i915_private_t *dev_priv = dev->dev_private; |
98 | drm_i915_private_t *dev_priv = dev->dev_private; |
99 | u32 addr; |
99 | u32 addr; |
100 | 100 | ||
101 | addr = dev_priv->status_page_dmah->busaddr; |
101 | addr = dev_priv->status_page_dmah->busaddr; |
102 | if (INTEL_INFO(dev)->gen >= 4) |
102 | if (INTEL_INFO(dev)->gen >= 4) |
103 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; |
103 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; |
104 | I915_WRITE(HWS_PGA, addr); |
104 | I915_WRITE(HWS_PGA, addr); |
105 | } |
105 | } |
106 | 106 | ||
107 | /** |
107 | /** |
108 | * Frees the hardware status page, whether it's a physical address or a virtual |
108 | * Frees the hardware status page, whether it's a physical address or a virtual |
109 | * address set up by the X Server. |
109 | * address set up by the X Server. |
110 | */ |
110 | */ |
111 | static void i915_free_hws(struct drm_device *dev) |
111 | static void i915_free_hws(struct drm_device *dev) |
112 | { |
112 | { |
113 | drm_i915_private_t *dev_priv = dev->dev_private; |
113 | drm_i915_private_t *dev_priv = dev->dev_private; |
114 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
114 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
115 | 115 | ||
116 | if (dev_priv->status_page_dmah) { |
116 | if (dev_priv->status_page_dmah) { |
117 | drm_pci_free(dev, dev_priv->status_page_dmah); |
117 | drm_pci_free(dev, dev_priv->status_page_dmah); |
118 | dev_priv->status_page_dmah = NULL; |
118 | dev_priv->status_page_dmah = NULL; |
119 | } |
119 | } |
120 | 120 | ||
121 | if (ring->status_page.gfx_addr) { |
121 | if (ring->status_page.gfx_addr) { |
122 | ring->status_page.gfx_addr = 0; |
122 | ring->status_page.gfx_addr = 0; |
123 | iounmap(dev_priv->dri1.gfx_hws_cpu_addr); |
123 | iounmap(dev_priv->dri1.gfx_hws_cpu_addr); |
124 | } |
124 | } |
125 | 125 | ||
126 | /* Need to rewrite hardware status page */ |
126 | /* Need to rewrite hardware status page */ |
127 | I915_WRITE(HWS_PGA, 0x1ffff000); |
127 | I915_WRITE(HWS_PGA, 0x1ffff000); |
128 | } |
128 | } |
129 | 129 | ||
130 | #if 0 |
130 | #if 0 |
131 | 131 | ||
132 | void i915_kernel_lost_context(struct drm_device * dev) |
132 | void i915_kernel_lost_context(struct drm_device * dev) |
133 | { |
133 | { |
134 | drm_i915_private_t *dev_priv = dev->dev_private; |
134 | drm_i915_private_t *dev_priv = dev->dev_private; |
135 | struct drm_i915_master_private *master_priv; |
135 | struct drm_i915_master_private *master_priv; |
136 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
136 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
137 | 137 | ||
138 | /* |
138 | /* |
139 | * We should never lose context on the ring with modesetting |
139 | * We should never lose context on the ring with modesetting |
140 | * as we don't expose it to userspace |
140 | * as we don't expose it to userspace |
141 | */ |
141 | */ |
142 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
142 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
143 | return; |
143 | return; |
144 | 144 | ||
145 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
145 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
146 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
146 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
147 | ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE); |
147 | ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE); |
148 | if (ring->space < 0) |
148 | if (ring->space < 0) |
149 | ring->space += ring->size; |
149 | ring->space += ring->size; |
150 | 150 | ||
151 | if (!dev->primary->master) |
151 | if (!dev->primary->master) |
152 | return; |
152 | return; |
153 | 153 | ||
154 | master_priv = dev->primary->master->driver_priv; |
154 | master_priv = dev->primary->master->driver_priv; |
155 | if (ring->head == ring->tail && master_priv->sarea_priv) |
155 | if (ring->head == ring->tail && master_priv->sarea_priv) |
156 | master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; |
156 | master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; |
157 | } |
157 | } |
158 | 158 | ||
159 | static int i915_dma_cleanup(struct drm_device * dev) |
159 | static int i915_dma_cleanup(struct drm_device * dev) |
160 | { |
160 | { |
161 | drm_i915_private_t *dev_priv = dev->dev_private; |
161 | drm_i915_private_t *dev_priv = dev->dev_private; |
162 | int i; |
162 | int i; |
163 | 163 | ||
164 | /* Make sure interrupts are disabled here because the uninstall ioctl |
164 | /* Make sure interrupts are disabled here because the uninstall ioctl |
165 | * may not have been called from userspace and after dev_private |
165 | * may not have been called from userspace and after dev_private |
166 | * is freed, it's too late. |
166 | * is freed, it's too late. |
167 | */ |
167 | */ |
168 | if (dev->irq_enabled) |
168 | if (dev->irq_enabled) |
169 | drm_irq_uninstall(dev); |
169 | drm_irq_uninstall(dev); |
170 | 170 | ||
171 | mutex_lock(&dev->struct_mutex); |
171 | mutex_lock(&dev->struct_mutex); |
172 | for (i = 0; i < I915_NUM_RINGS; i++) |
172 | for (i = 0; i < I915_NUM_RINGS; i++) |
173 | intel_cleanup_ring_buffer(&dev_priv->ring[i]); |
173 | intel_cleanup_ring_buffer(&dev_priv->ring[i]); |
174 | mutex_unlock(&dev->struct_mutex); |
174 | mutex_unlock(&dev->struct_mutex); |
175 | 175 | ||
176 | /* Clear the HWS virtual address at teardown */ |
176 | /* Clear the HWS virtual address at teardown */ |
177 | if (I915_NEED_GFX_HWS(dev)) |
177 | if (I915_NEED_GFX_HWS(dev)) |
178 | i915_free_hws(dev); |
178 | i915_free_hws(dev); |
179 | 179 | ||
180 | return 0; |
180 | return 0; |
181 | } |
181 | } |
182 | 182 | ||
183 | static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) |
183 | static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) |
184 | { |
184 | { |
185 | drm_i915_private_t *dev_priv = dev->dev_private; |
185 | drm_i915_private_t *dev_priv = dev->dev_private; |
186 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
186 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
187 | int ret; |
187 | int ret; |
188 | 188 | ||
189 | master_priv->sarea = drm_getsarea(dev); |
189 | master_priv->sarea = drm_getsarea(dev); |
190 | if (master_priv->sarea) { |
190 | if (master_priv->sarea) { |
191 | master_priv->sarea_priv = (drm_i915_sarea_t *) |
191 | master_priv->sarea_priv = (drm_i915_sarea_t *) |
192 | ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); |
192 | ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); |
193 | } else { |
193 | } else { |
194 | DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); |
194 | DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); |
195 | } |
195 | } |
196 | 196 | ||
197 | if (init->ring_size != 0) { |
197 | if (init->ring_size != 0) { |
198 | if (LP_RING(dev_priv)->obj != NULL) { |
198 | if (LP_RING(dev_priv)->obj != NULL) { |
199 | i915_dma_cleanup(dev); |
199 | i915_dma_cleanup(dev); |
200 | DRM_ERROR("Client tried to initialize ringbuffer in " |
200 | DRM_ERROR("Client tried to initialize ringbuffer in " |
201 | "GEM mode\n"); |
201 | "GEM mode\n"); |
202 | return -EINVAL; |
202 | return -EINVAL; |
203 | } |
203 | } |
204 | 204 | ||
205 | ret = intel_render_ring_init_dri(dev, |
205 | ret = intel_render_ring_init_dri(dev, |
206 | init->ring_start, |
206 | init->ring_start, |
207 | init->ring_size); |
207 | init->ring_size); |
208 | if (ret) { |
208 | if (ret) { |
209 | i915_dma_cleanup(dev); |
209 | i915_dma_cleanup(dev); |
210 | return ret; |
210 | return ret; |
211 | } |
211 | } |
212 | } |
212 | } |
213 | 213 | ||
214 | dev_priv->dri1.cpp = init->cpp; |
214 | dev_priv->dri1.cpp = init->cpp; |
215 | dev_priv->dri1.back_offset = init->back_offset; |
215 | dev_priv->dri1.back_offset = init->back_offset; |
216 | dev_priv->dri1.front_offset = init->front_offset; |
216 | dev_priv->dri1.front_offset = init->front_offset; |
217 | dev_priv->dri1.current_page = 0; |
217 | dev_priv->dri1.current_page = 0; |
218 | if (master_priv->sarea_priv) |
218 | if (master_priv->sarea_priv) |
219 | master_priv->sarea_priv->pf_current_page = 0; |
219 | master_priv->sarea_priv->pf_current_page = 0; |
220 | 220 | ||
221 | /* Allow hardware batchbuffers unless told otherwise. |
221 | /* Allow hardware batchbuffers unless told otherwise. |
222 | */ |
222 | */ |
223 | dev_priv->dri1.allow_batchbuffer = 1; |
223 | dev_priv->dri1.allow_batchbuffer = 1; |
224 | 224 | ||
225 | return 0; |
225 | return 0; |
226 | } |
226 | } |
227 | 227 | ||
228 | static int i915_dma_resume(struct drm_device * dev) |
228 | static int i915_dma_resume(struct drm_device * dev) |
229 | { |
229 | { |
230 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
230 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
231 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
231 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
232 | 232 | ||
233 | DRM_DEBUG_DRIVER("%s\n", __func__); |
233 | DRM_DEBUG_DRIVER("%s\n", __func__); |
234 | 234 | ||
235 | if (ring->virtual_start == NULL) { |
235 | if (ring->virtual_start == NULL) { |
236 | DRM_ERROR("can not ioremap virtual address for" |
236 | DRM_ERROR("can not ioremap virtual address for" |
237 | " ring buffer\n"); |
237 | " ring buffer\n"); |
238 | return -ENOMEM; |
238 | return -ENOMEM; |
239 | } |
239 | } |
240 | 240 | ||
241 | /* Program Hardware Status Page */ |
241 | /* Program Hardware Status Page */ |
242 | if (!ring->status_page.page_addr) { |
242 | if (!ring->status_page.page_addr) { |
243 | DRM_ERROR("Can not find hardware status page\n"); |
243 | DRM_ERROR("Can not find hardware status page\n"); |
244 | return -EINVAL; |
244 | return -EINVAL; |
245 | } |
245 | } |
246 | DRM_DEBUG_DRIVER("hw status page @ %p\n", |
246 | DRM_DEBUG_DRIVER("hw status page @ %p\n", |
247 | ring->status_page.page_addr); |
247 | ring->status_page.page_addr); |
248 | if (ring->status_page.gfx_addr != 0) |
248 | if (ring->status_page.gfx_addr != 0) |
249 | intel_ring_setup_status_page(ring); |
249 | intel_ring_setup_status_page(ring); |
250 | else |
250 | else |
251 | i915_write_hws_pga(dev); |
251 | i915_write_hws_pga(dev); |
252 | 252 | ||
253 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
253 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
254 | 254 | ||
255 | return 0; |
255 | return 0; |
256 | } |
256 | } |
257 | 257 | ||
258 | static int i915_dma_init(struct drm_device *dev, void *data, |
258 | static int i915_dma_init(struct drm_device *dev, void *data, |
259 | struct drm_file *file_priv) |
259 | struct drm_file *file_priv) |
260 | { |
260 | { |
261 | drm_i915_init_t *init = data; |
261 | drm_i915_init_t *init = data; |
262 | int retcode = 0; |
262 | int retcode = 0; |
263 | 263 | ||
264 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
264 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
265 | return -ENODEV; |
265 | return -ENODEV; |
266 | 266 | ||
267 | switch (init->func) { |
267 | switch (init->func) { |
268 | case I915_INIT_DMA: |
268 | case I915_INIT_DMA: |
269 | retcode = i915_initialize(dev, init); |
269 | retcode = i915_initialize(dev, init); |
270 | break; |
270 | break; |
271 | case I915_CLEANUP_DMA: |
271 | case I915_CLEANUP_DMA: |
272 | retcode = i915_dma_cleanup(dev); |
272 | retcode = i915_dma_cleanup(dev); |
273 | break; |
273 | break; |
274 | case I915_RESUME_DMA: |
274 | case I915_RESUME_DMA: |
275 | retcode = i915_dma_resume(dev); |
275 | retcode = i915_dma_resume(dev); |
276 | break; |
276 | break; |
277 | default: |
277 | default: |
278 | retcode = -EINVAL; |
278 | retcode = -EINVAL; |
279 | break; |
279 | break; |
280 | } |
280 | } |
281 | 281 | ||
282 | return retcode; |
282 | return retcode; |
283 | } |
283 | } |
284 | 284 | ||
285 | /* Implement basically the same security restrictions as hardware does |
285 | /* Implement basically the same security restrictions as hardware does |
286 | * for MI_BATCH_NON_SECURE. These can be made stricter at any time. |
286 | * for MI_BATCH_NON_SECURE. These can be made stricter at any time. |
287 | * |
287 | * |
288 | * Most of the calculations below involve calculating the size of a |
288 | * Most of the calculations below involve calculating the size of a |
289 | * particular instruction. It's important to get the size right as |
289 | * particular instruction. It's important to get the size right as |
290 | * that tells us where the next instruction to check is. Any illegal |
290 | * that tells us where the next instruction to check is. Any illegal |
291 | * instruction detected will be given a size of zero, which is a |
291 | * instruction detected will be given a size of zero, which is a |
292 | * signal to abort the rest of the buffer. |
292 | * signal to abort the rest of the buffer. |
293 | */ |
293 | */ |
294 | static int validate_cmd(int cmd) |
294 | static int validate_cmd(int cmd) |
295 | { |
295 | { |
296 | switch (((cmd >> 29) & 0x7)) { |
296 | switch (((cmd >> 29) & 0x7)) { |
297 | case 0x0: |
297 | case 0x0: |
298 | switch ((cmd >> 23) & 0x3f) { |
298 | switch ((cmd >> 23) & 0x3f) { |
299 | case 0x0: |
299 | case 0x0: |
300 | return 1; /* MI_NOOP */ |
300 | return 1; /* MI_NOOP */ |
301 | case 0x4: |
301 | case 0x4: |
302 | return 1; /* MI_FLUSH */ |
302 | return 1; /* MI_FLUSH */ |
303 | default: |
303 | default: |
304 | return 0; /* disallow everything else */ |
304 | return 0; /* disallow everything else */ |
305 | } |
305 | } |
306 | break; |
306 | break; |
307 | case 0x1: |
307 | case 0x1: |
308 | return 0; /* reserved */ |
308 | return 0; /* reserved */ |
309 | case 0x2: |
309 | case 0x2: |
310 | return (cmd & 0xff) + 2; /* 2d commands */ |
310 | return (cmd & 0xff) + 2; /* 2d commands */ |
311 | case 0x3: |
311 | case 0x3: |
312 | if (((cmd >> 24) & 0x1f) <= 0x18) |
312 | if (((cmd >> 24) & 0x1f) <= 0x18) |
313 | return 1; |
313 | return 1; |
314 | 314 | ||
315 | switch ((cmd >> 24) & 0x1f) { |
315 | switch ((cmd >> 24) & 0x1f) { |
316 | case 0x1c: |
316 | case 0x1c: |
317 | return 1; |
317 | return 1; |
318 | case 0x1d: |
318 | case 0x1d: |
319 | switch ((cmd >> 16) & 0xff) { |
319 | switch ((cmd >> 16) & 0xff) { |
320 | case 0x3: |
320 | case 0x3: |
321 | return (cmd & 0x1f) + 2; |
321 | return (cmd & 0x1f) + 2; |
322 | case 0x4: |
322 | case 0x4: |
323 | return (cmd & 0xf) + 2; |
323 | return (cmd & 0xf) + 2; |
324 | default: |
324 | default: |
325 | return (cmd & 0xffff) + 2; |
325 | return (cmd & 0xffff) + 2; |
326 | } |
326 | } |
327 | case 0x1e: |
327 | case 0x1e: |
328 | if (cmd & (1 << 23)) |
328 | if (cmd & (1 << 23)) |
329 | return (cmd & 0xffff) + 1; |
329 | return (cmd & 0xffff) + 1; |
330 | else |
330 | else |
331 | return 1; |
331 | return 1; |
332 | case 0x1f: |
332 | case 0x1f: |
333 | if ((cmd & (1 << 23)) == 0) /* inline vertices */ |
333 | if ((cmd & (1 << 23)) == 0) /* inline vertices */ |
334 | return (cmd & 0x1ffff) + 2; |
334 | return (cmd & 0x1ffff) + 2; |
335 | else if (cmd & (1 << 17)) /* indirect random */ |
335 | else if (cmd & (1 << 17)) /* indirect random */ |
336 | if ((cmd & 0xffff) == 0) |
336 | if ((cmd & 0xffff) == 0) |
337 | return 0; /* unknown length, too hard */ |
337 | return 0; /* unknown length, too hard */ |
338 | else |
338 | else |
339 | return (((cmd & 0xffff) + 1) / 2) + 1; |
339 | return (((cmd & 0xffff) + 1) / 2) + 1; |
340 | else |
340 | else |
341 | return 2; /* indirect sequential */ |
341 | return 2; /* indirect sequential */ |
342 | default: |
342 | default: |
343 | return 0; |
343 | return 0; |
344 | } |
344 | } |
345 | default: |
345 | default: |
346 | return 0; |
346 | return 0; |
347 | } |
347 | } |
348 | 348 | ||
349 | return 0; |
349 | return 0; |
350 | } |
350 | } |
351 | 351 | ||
352 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) |
352 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) |
353 | { |
353 | { |
354 | drm_i915_private_t *dev_priv = dev->dev_private; |
354 | drm_i915_private_t *dev_priv = dev->dev_private; |
355 | int i, ret; |
355 | int i, ret; |
356 | 356 | ||
357 | if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) |
357 | if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) |
358 | return -EINVAL; |
358 | return -EINVAL; |
359 | 359 | ||
360 | for (i = 0; i < dwords;) { |
360 | for (i = 0; i < dwords;) { |
361 | int sz = validate_cmd(buffer[i]); |
361 | int sz = validate_cmd(buffer[i]); |
362 | if (sz == 0 || i + sz > dwords) |
362 | if (sz == 0 || i + sz > dwords) |
363 | return -EINVAL; |
363 | return -EINVAL; |
364 | i += sz; |
364 | i += sz; |
365 | } |
365 | } |
366 | 366 | ||
367 | ret = BEGIN_LP_RING((dwords+1)&~1); |
367 | ret = BEGIN_LP_RING((dwords+1)&~1); |
368 | if (ret) |
368 | if (ret) |
369 | return ret; |
369 | return ret; |
370 | 370 | ||
371 | for (i = 0; i < dwords; i++) |
371 | for (i = 0; i < dwords; i++) |
372 | OUT_RING(buffer[i]); |
372 | OUT_RING(buffer[i]); |
373 | if (dwords & 1) |
373 | if (dwords & 1) |
374 | OUT_RING(0); |
374 | OUT_RING(0); |
375 | 375 | ||
376 | ADVANCE_LP_RING(); |
376 | ADVANCE_LP_RING(); |
377 | 377 | ||
378 | return 0; |
378 | return 0; |
379 | } |
379 | } |
380 | #endif |
380 | #endif |
381 | 381 | ||
382 | int |
382 | int |
383 | i915_emit_box(struct drm_device *dev, |
383 | i915_emit_box(struct drm_device *dev, |
384 | struct drm_clip_rect *box, |
384 | struct drm_clip_rect *box, |
385 | int DR1, int DR4) |
385 | int DR1, int DR4) |
386 | { |
386 | { |
387 | struct drm_i915_private *dev_priv = dev->dev_private; |
387 | struct drm_i915_private *dev_priv = dev->dev_private; |
388 | int ret; |
388 | int ret; |
389 | 389 | ||
390 | if (box->y2 <= box->y1 || box->x2 <= box->x1 || |
390 | if (box->y2 <= box->y1 || box->x2 <= box->x1 || |
391 | box->y2 <= 0 || box->x2 <= 0) { |
391 | box->y2 <= 0 || box->x2 <= 0) { |
392 | DRM_ERROR("Bad box %d,%d..%d,%d\n", |
392 | DRM_ERROR("Bad box %d,%d..%d,%d\n", |
393 | box->x1, box->y1, box->x2, box->y2); |
393 | box->x1, box->y1, box->x2, box->y2); |
394 | return -EINVAL; |
394 | return -EINVAL; |
395 | } |
395 | } |
396 | 396 | ||
397 | if (INTEL_INFO(dev)->gen >= 4) { |
397 | if (INTEL_INFO(dev)->gen >= 4) { |
398 | ret = BEGIN_LP_RING(4); |
398 | ret = BEGIN_LP_RING(4); |
399 | if (ret) |
399 | if (ret) |
400 | return ret; |
400 | return ret; |
401 | 401 | ||
402 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); |
402 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); |
403 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
403 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
404 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
404 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
405 | OUT_RING(DR4); |
405 | OUT_RING(DR4); |
406 | } else { |
406 | } else { |
407 | ret = BEGIN_LP_RING(6); |
407 | ret = BEGIN_LP_RING(6); |
408 | if (ret) |
408 | if (ret) |
409 | return ret; |
409 | return ret; |
410 | 410 | ||
411 | OUT_RING(GFX_OP_DRAWRECT_INFO); |
411 | OUT_RING(GFX_OP_DRAWRECT_INFO); |
412 | OUT_RING(DR1); |
412 | OUT_RING(DR1); |
413 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
413 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
414 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
414 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
415 | OUT_RING(DR4); |
415 | OUT_RING(DR4); |
416 | OUT_RING(0); |
416 | OUT_RING(0); |
417 | } |
417 | } |
418 | ADVANCE_LP_RING(); |
418 | ADVANCE_LP_RING(); |
419 | 419 | ||
420 | return 0; |
420 | return 0; |
421 | } |
421 | } |
422 | 422 | ||
423 | #if 0 |
423 | #if 0 |
424 | /* XXX: Emitting the counter should really be moved to part of the IRQ |
424 | /* XXX: Emitting the counter should really be moved to part of the IRQ |
425 | * emit. For now, do it in both places: |
425 | * emit. For now, do it in both places: |
426 | */ |
426 | */ |
427 | 427 | ||
428 | static void i915_emit_breadcrumb(struct drm_device *dev) |
428 | static void i915_emit_breadcrumb(struct drm_device *dev) |
429 | { |
429 | { |
430 | drm_i915_private_t *dev_priv = dev->dev_private; |
430 | drm_i915_private_t *dev_priv = dev->dev_private; |
431 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
431 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
432 | 432 | ||
433 | dev_priv->dri1.counter++; |
433 | dev_priv->dri1.counter++; |
434 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) |
434 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) |
435 | dev_priv->dri1.counter = 0; |
435 | dev_priv->dri1.counter = 0; |
436 | if (master_priv->sarea_priv) |
436 | if (master_priv->sarea_priv) |
437 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; |
437 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; |
438 | 438 | ||
439 | if (BEGIN_LP_RING(4) == 0) { |
439 | if (BEGIN_LP_RING(4) == 0) { |
440 | OUT_RING(MI_STORE_DWORD_INDEX); |
440 | OUT_RING(MI_STORE_DWORD_INDEX); |
441 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
441 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
442 | OUT_RING(dev_priv->dri1.counter); |
442 | OUT_RING(dev_priv->dri1.counter); |
443 | OUT_RING(0); |
443 | OUT_RING(0); |
444 | ADVANCE_LP_RING(); |
444 | ADVANCE_LP_RING(); |
445 | } |
445 | } |
446 | } |
446 | } |
447 | 447 | ||
448 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, |
448 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, |
449 | drm_i915_cmdbuffer_t *cmd, |
449 | drm_i915_cmdbuffer_t *cmd, |
450 | struct drm_clip_rect *cliprects, |
450 | struct drm_clip_rect *cliprects, |
451 | void *cmdbuf) |
451 | void *cmdbuf) |
452 | { |
452 | { |
453 | int nbox = cmd->num_cliprects; |
453 | int nbox = cmd->num_cliprects; |
454 | int i = 0, count, ret; |
454 | int i = 0, count, ret; |
455 | 455 | ||
456 | if (cmd->sz & 0x3) { |
456 | if (cmd->sz & 0x3) { |
457 | DRM_ERROR("alignment"); |
457 | DRM_ERROR("alignment"); |
458 | return -EINVAL; |
458 | return -EINVAL; |
459 | } |
459 | } |
460 | 460 | ||
461 | i915_kernel_lost_context(dev); |
461 | i915_kernel_lost_context(dev); |
462 | 462 | ||
463 | count = nbox ? nbox : 1; |
463 | count = nbox ? nbox : 1; |
464 | 464 | ||
465 | for (i = 0; i < count; i++) { |
465 | for (i = 0; i < count; i++) { |
466 | if (i < nbox) { |
466 | if (i < nbox) { |
467 | ret = i915_emit_box(dev, &cliprects[i], |
467 | ret = i915_emit_box(dev, &cliprects[i], |
468 | cmd->DR1, cmd->DR4); |
468 | cmd->DR1, cmd->DR4); |
469 | if (ret) |
469 | if (ret) |
470 | return ret; |
470 | return ret; |
471 | } |
471 | } |
472 | 472 | ||
473 | ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); |
473 | ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); |
474 | if (ret) |
474 | if (ret) |
475 | return ret; |
475 | return ret; |
476 | } |
476 | } |
477 | 477 | ||
478 | i915_emit_breadcrumb(dev); |
478 | i915_emit_breadcrumb(dev); |
479 | return 0; |
479 | return 0; |
480 | } |
480 | } |
481 | 481 | ||
482 | static int i915_dispatch_batchbuffer(struct drm_device * dev, |
482 | static int i915_dispatch_batchbuffer(struct drm_device * dev, |
483 | drm_i915_batchbuffer_t * batch, |
483 | drm_i915_batchbuffer_t * batch, |
484 | struct drm_clip_rect *cliprects) |
484 | struct drm_clip_rect *cliprects) |
485 | { |
485 | { |
486 | struct drm_i915_private *dev_priv = dev->dev_private; |
486 | struct drm_i915_private *dev_priv = dev->dev_private; |
487 | int nbox = batch->num_cliprects; |
487 | int nbox = batch->num_cliprects; |
488 | int i, count, ret; |
488 | int i, count, ret; |
489 | 489 | ||
490 | if ((batch->start | batch->used) & 0x7) { |
490 | if ((batch->start | batch->used) & 0x7) { |
491 | DRM_ERROR("alignment"); |
491 | DRM_ERROR("alignment"); |
492 | return -EINVAL; |
492 | return -EINVAL; |
493 | } |
493 | } |
494 | 494 | ||
495 | i915_kernel_lost_context(dev); |
495 | i915_kernel_lost_context(dev); |
496 | 496 | ||
497 | count = nbox ? nbox : 1; |
497 | count = nbox ? nbox : 1; |
498 | for (i = 0; i < count; i++) { |
498 | for (i = 0; i < count; i++) { |
499 | if (i < nbox) { |
499 | if (i < nbox) { |
500 | ret = i915_emit_box(dev, &cliprects[i], |
500 | ret = i915_emit_box(dev, &cliprects[i], |
501 | batch->DR1, batch->DR4); |
501 | batch->DR1, batch->DR4); |
502 | if (ret) |
502 | if (ret) |
503 | return ret; |
503 | return ret; |
504 | } |
504 | } |
505 | 505 | ||
506 | if (!IS_I830(dev) && !IS_845G(dev)) { |
506 | if (!IS_I830(dev) && !IS_845G(dev)) { |
507 | ret = BEGIN_LP_RING(2); |
507 | ret = BEGIN_LP_RING(2); |
508 | if (ret) |
508 | if (ret) |
509 | return ret; |
509 | return ret; |
510 | 510 | ||
511 | if (INTEL_INFO(dev)->gen >= 4) { |
511 | if (INTEL_INFO(dev)->gen >= 4) { |
512 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); |
512 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); |
513 | OUT_RING(batch->start); |
513 | OUT_RING(batch->start); |
514 | } else { |
514 | } else { |
515 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); |
515 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); |
516 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); |
516 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); |
517 | } |
517 | } |
518 | } else { |
518 | } else { |
519 | ret = BEGIN_LP_RING(4); |
519 | ret = BEGIN_LP_RING(4); |
520 | if (ret) |
520 | if (ret) |
521 | return ret; |
521 | return ret; |
522 | 522 | ||
523 | OUT_RING(MI_BATCH_BUFFER); |
523 | OUT_RING(MI_BATCH_BUFFER); |
524 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); |
524 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); |
525 | OUT_RING(batch->start + batch->used - 4); |
525 | OUT_RING(batch->start + batch->used - 4); |
526 | OUT_RING(0); |
526 | OUT_RING(0); |
527 | } |
527 | } |
528 | ADVANCE_LP_RING(); |
528 | ADVANCE_LP_RING(); |
529 | } |
529 | } |
530 | 530 | ||
531 | 531 | ||
532 | if (IS_G4X(dev) || IS_GEN5(dev)) { |
532 | if (IS_G4X(dev) || IS_GEN5(dev)) { |
533 | if (BEGIN_LP_RING(2) == 0) { |
533 | if (BEGIN_LP_RING(2) == 0) { |
534 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); |
534 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); |
535 | OUT_RING(MI_NOOP); |
535 | OUT_RING(MI_NOOP); |
536 | ADVANCE_LP_RING(); |
536 | ADVANCE_LP_RING(); |
537 | } |
537 | } |
538 | } |
538 | } |
539 | 539 | ||
540 | i915_emit_breadcrumb(dev); |
540 | i915_emit_breadcrumb(dev); |
541 | return 0; |
541 | return 0; |
542 | } |
542 | } |
543 | 543 | ||
544 | static int i915_dispatch_flip(struct drm_device * dev) |
544 | static int i915_dispatch_flip(struct drm_device * dev) |
545 | { |
545 | { |
546 | drm_i915_private_t *dev_priv = dev->dev_private; |
546 | drm_i915_private_t *dev_priv = dev->dev_private; |
547 | struct drm_i915_master_private *master_priv = |
547 | struct drm_i915_master_private *master_priv = |
548 | dev->primary->master->driver_priv; |
548 | dev->primary->master->driver_priv; |
549 | int ret; |
549 | int ret; |
550 | 550 | ||
551 | if (!master_priv->sarea_priv) |
551 | if (!master_priv->sarea_priv) |
552 | return -EINVAL; |
552 | return -EINVAL; |
553 | 553 | ||
554 | DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", |
554 | DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", |
555 | __func__, |
555 | __func__, |
556 | dev_priv->dri1.current_page, |
556 | dev_priv->dri1.current_page, |
557 | master_priv->sarea_priv->pf_current_page); |
557 | master_priv->sarea_priv->pf_current_page); |
558 | 558 | ||
559 | i915_kernel_lost_context(dev); |
559 | i915_kernel_lost_context(dev); |
560 | 560 | ||
561 | ret = BEGIN_LP_RING(10); |
561 | ret = BEGIN_LP_RING(10); |
562 | if (ret) |
562 | if (ret) |
563 | return ret; |
563 | return ret; |
564 | 564 | ||
565 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); |
565 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); |
566 | OUT_RING(0); |
566 | OUT_RING(0); |
567 | 567 | ||
568 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); |
568 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); |
569 | OUT_RING(0); |
569 | OUT_RING(0); |
570 | if (dev_priv->dri1.current_page == 0) { |
570 | if (dev_priv->dri1.current_page == 0) { |
571 | OUT_RING(dev_priv->dri1.back_offset); |
571 | OUT_RING(dev_priv->dri1.back_offset); |
572 | dev_priv->dri1.current_page = 1; |
572 | dev_priv->dri1.current_page = 1; |
573 | } else { |
573 | } else { |
574 | OUT_RING(dev_priv->dri1.front_offset); |
574 | OUT_RING(dev_priv->dri1.front_offset); |
575 | dev_priv->dri1.current_page = 0; |
575 | dev_priv->dri1.current_page = 0; |
576 | } |
576 | } |
577 | OUT_RING(0); |
577 | OUT_RING(0); |
578 | 578 | ||
579 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); |
579 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); |
580 | OUT_RING(0); |
580 | OUT_RING(0); |
581 | 581 | ||
582 | ADVANCE_LP_RING(); |
582 | ADVANCE_LP_RING(); |
583 | 583 | ||
584 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++; |
584 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++; |
585 | 585 | ||
586 | if (BEGIN_LP_RING(4) == 0) { |
586 | if (BEGIN_LP_RING(4) == 0) { |
587 | OUT_RING(MI_STORE_DWORD_INDEX); |
587 | OUT_RING(MI_STORE_DWORD_INDEX); |
588 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
588 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
589 | OUT_RING(dev_priv->dri1.counter); |
589 | OUT_RING(dev_priv->dri1.counter); |
590 | OUT_RING(0); |
590 | OUT_RING(0); |
591 | ADVANCE_LP_RING(); |
591 | ADVANCE_LP_RING(); |
592 | } |
592 | } |
593 | 593 | ||
594 | master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page; |
594 | master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page; |
595 | return 0; |
595 | return 0; |
596 | } |
596 | } |
597 | 597 | ||
598 | static int i915_quiescent(struct drm_device *dev) |
598 | static int i915_quiescent(struct drm_device *dev) |
599 | { |
599 | { |
600 | i915_kernel_lost_context(dev); |
600 | i915_kernel_lost_context(dev); |
601 | return intel_ring_idle(LP_RING(dev->dev_private)); |
601 | return intel_ring_idle(LP_RING(dev->dev_private)); |
602 | } |
602 | } |
603 | 603 | ||
604 | static int i915_flush_ioctl(struct drm_device *dev, void *data, |
604 | static int i915_flush_ioctl(struct drm_device *dev, void *data, |
605 | struct drm_file *file_priv) |
605 | struct drm_file *file_priv) |
606 | { |
606 | { |
607 | int ret; |
607 | int ret; |
608 | 608 | ||
609 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
609 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
610 | return -ENODEV; |
610 | return -ENODEV; |
611 | 611 | ||
612 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
612 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
613 | 613 | ||
614 | mutex_lock(&dev->struct_mutex); |
614 | mutex_lock(&dev->struct_mutex); |
615 | ret = i915_quiescent(dev); |
615 | ret = i915_quiescent(dev); |
616 | mutex_unlock(&dev->struct_mutex); |
616 | mutex_unlock(&dev->struct_mutex); |
617 | 617 | ||
618 | return ret; |
618 | return ret; |
619 | } |
619 | } |
620 | 620 | ||
621 | static int i915_batchbuffer(struct drm_device *dev, void *data, |
621 | static int i915_batchbuffer(struct drm_device *dev, void *data, |
622 | struct drm_file *file_priv) |
622 | struct drm_file *file_priv) |
623 | { |
623 | { |
624 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
624 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
625 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
625 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
626 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
626 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
627 | master_priv->sarea_priv; |
627 | master_priv->sarea_priv; |
628 | drm_i915_batchbuffer_t *batch = data; |
628 | drm_i915_batchbuffer_t *batch = data; |
629 | int ret; |
629 | int ret; |
630 | struct drm_clip_rect *cliprects = NULL; |
630 | struct drm_clip_rect *cliprects = NULL; |
631 | 631 | ||
632 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
632 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
633 | return -ENODEV; |
633 | return -ENODEV; |
634 | 634 | ||
635 | if (!dev_priv->dri1.allow_batchbuffer) { |
635 | if (!dev_priv->dri1.allow_batchbuffer) { |
636 | DRM_ERROR("Batchbuffer ioctl disabled\n"); |
636 | DRM_ERROR("Batchbuffer ioctl disabled\n"); |
637 | return -EINVAL; |
637 | return -EINVAL; |
638 | } |
638 | } |
639 | 639 | ||
640 | DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", |
640 | DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", |
641 | batch->start, batch->used, batch->num_cliprects); |
641 | batch->start, batch->used, batch->num_cliprects); |
642 | 642 | ||
643 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
643 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
644 | 644 | ||
645 | if (batch->num_cliprects < 0) |
645 | if (batch->num_cliprects < 0) |
646 | return -EINVAL; |
646 | return -EINVAL; |
647 | 647 | ||
648 | if (batch->num_cliprects) { |
648 | if (batch->num_cliprects) { |
649 | cliprects = kcalloc(batch->num_cliprects, |
649 | cliprects = kcalloc(batch->num_cliprects, |
650 | sizeof(struct drm_clip_rect), |
650 | sizeof(struct drm_clip_rect), |
651 | GFP_KERNEL); |
651 | GFP_KERNEL); |
652 | if (cliprects == NULL) |
652 | if (cliprects == NULL) |
653 | return -ENOMEM; |
653 | return -ENOMEM; |
654 | 654 | ||
655 | ret = copy_from_user(cliprects, batch->cliprects, |
655 | ret = copy_from_user(cliprects, batch->cliprects, |
656 | batch->num_cliprects * |
656 | batch->num_cliprects * |
657 | sizeof(struct drm_clip_rect)); |
657 | sizeof(struct drm_clip_rect)); |
658 | if (ret != 0) { |
658 | if (ret != 0) { |
659 | ret = -EFAULT; |
659 | ret = -EFAULT; |
660 | goto fail_free; |
660 | goto fail_free; |
661 | } |
661 | } |
662 | } |
662 | } |
663 | 663 | ||
664 | mutex_lock(&dev->struct_mutex); |
664 | mutex_lock(&dev->struct_mutex); |
665 | ret = i915_dispatch_batchbuffer(dev, batch, cliprects); |
665 | ret = i915_dispatch_batchbuffer(dev, batch, cliprects); |
666 | mutex_unlock(&dev->struct_mutex); |
666 | mutex_unlock(&dev->struct_mutex); |
667 | 667 | ||
668 | if (sarea_priv) |
668 | if (sarea_priv) |
669 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
669 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
670 | 670 | ||
671 | fail_free: |
671 | fail_free: |
672 | kfree(cliprects); |
672 | kfree(cliprects); |
673 | 673 | ||
674 | return ret; |
674 | return ret; |
675 | } |
675 | } |
676 | 676 | ||
677 | static int i915_cmdbuffer(struct drm_device *dev, void *data, |
677 | static int i915_cmdbuffer(struct drm_device *dev, void *data, |
678 | struct drm_file *file_priv) |
678 | struct drm_file *file_priv) |
679 | { |
679 | { |
680 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
680 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
681 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
681 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
682 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
682 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
683 | master_priv->sarea_priv; |
683 | master_priv->sarea_priv; |
684 | drm_i915_cmdbuffer_t *cmdbuf = data; |
684 | drm_i915_cmdbuffer_t *cmdbuf = data; |
685 | struct drm_clip_rect *cliprects = NULL; |
685 | struct drm_clip_rect *cliprects = NULL; |
686 | void *batch_data; |
686 | void *batch_data; |
687 | int ret; |
687 | int ret; |
688 | 688 | ||
689 | DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", |
689 | DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", |
690 | cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); |
690 | cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); |
691 | 691 | ||
692 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
692 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
693 | return -ENODEV; |
693 | return -ENODEV; |
694 | 694 | ||
695 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
695 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
696 | 696 | ||
697 | if (cmdbuf->num_cliprects < 0) |
697 | if (cmdbuf->num_cliprects < 0) |
698 | return -EINVAL; |
698 | return -EINVAL; |
699 | 699 | ||
700 | batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); |
700 | batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); |
701 | if (batch_data == NULL) |
701 | if (batch_data == NULL) |
702 | return -ENOMEM; |
702 | return -ENOMEM; |
703 | 703 | ||
704 | ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); |
704 | ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); |
705 | if (ret != 0) { |
705 | if (ret != 0) { |
706 | ret = -EFAULT; |
706 | ret = -EFAULT; |
707 | goto fail_batch_free; |
707 | goto fail_batch_free; |
708 | } |
708 | } |
709 | 709 | ||
710 | if (cmdbuf->num_cliprects) { |
710 | if (cmdbuf->num_cliprects) { |
711 | cliprects = kcalloc(cmdbuf->num_cliprects, |
711 | cliprects = kcalloc(cmdbuf->num_cliprects, |
712 | sizeof(struct drm_clip_rect), GFP_KERNEL); |
712 | sizeof(struct drm_clip_rect), GFP_KERNEL); |
713 | if (cliprects == NULL) { |
713 | if (cliprects == NULL) { |
714 | ret = -ENOMEM; |
714 | ret = -ENOMEM; |
715 | goto fail_batch_free; |
715 | goto fail_batch_free; |
716 | } |
716 | } |
717 | 717 | ||
718 | ret = copy_from_user(cliprects, cmdbuf->cliprects, |
718 | ret = copy_from_user(cliprects, cmdbuf->cliprects, |
719 | cmdbuf->num_cliprects * |
719 | cmdbuf->num_cliprects * |
720 | sizeof(struct drm_clip_rect)); |
720 | sizeof(struct drm_clip_rect)); |
721 | if (ret != 0) { |
721 | if (ret != 0) { |
722 | ret = -EFAULT; |
722 | ret = -EFAULT; |
723 | goto fail_clip_free; |
723 | goto fail_clip_free; |
724 | } |
724 | } |
725 | } |
725 | } |
726 | 726 | ||
727 | mutex_lock(&dev->struct_mutex); |
727 | mutex_lock(&dev->struct_mutex); |
728 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); |
728 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); |
729 | mutex_unlock(&dev->struct_mutex); |
729 | mutex_unlock(&dev->struct_mutex); |
730 | if (ret) { |
730 | if (ret) { |
731 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); |
731 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); |
732 | goto fail_clip_free; |
732 | goto fail_clip_free; |
733 | } |
733 | } |
734 | 734 | ||
735 | if (sarea_priv) |
735 | if (sarea_priv) |
736 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
736 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
737 | 737 | ||
738 | fail_clip_free: |
738 | fail_clip_free: |
739 | kfree(cliprects); |
739 | kfree(cliprects); |
740 | fail_batch_free: |
740 | fail_batch_free: |
741 | kfree(batch_data); |
741 | kfree(batch_data); |
742 | 742 | ||
743 | return ret; |
743 | return ret; |
744 | } |
744 | } |
745 | 745 | ||
746 | static int i915_emit_irq(struct drm_device * dev) |
746 | static int i915_emit_irq(struct drm_device * dev) |
747 | { |
747 | { |
748 | drm_i915_private_t *dev_priv = dev->dev_private; |
748 | drm_i915_private_t *dev_priv = dev->dev_private; |
749 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
749 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
750 | 750 | ||
751 | i915_kernel_lost_context(dev); |
751 | i915_kernel_lost_context(dev); |
752 | 752 | ||
753 | DRM_DEBUG_DRIVER("\n"); |
753 | DRM_DEBUG_DRIVER("\n"); |
754 | 754 | ||
755 | dev_priv->dri1.counter++; |
755 | dev_priv->dri1.counter++; |
756 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) |
756 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) |
757 | dev_priv->dri1.counter = 1; |
757 | dev_priv->dri1.counter = 1; |
758 | if (master_priv->sarea_priv) |
758 | if (master_priv->sarea_priv) |
759 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; |
759 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; |
760 | 760 | ||
761 | if (BEGIN_LP_RING(4) == 0) { |
761 | if (BEGIN_LP_RING(4) == 0) { |
762 | OUT_RING(MI_STORE_DWORD_INDEX); |
762 | OUT_RING(MI_STORE_DWORD_INDEX); |
763 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
763 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
764 | OUT_RING(dev_priv->dri1.counter); |
764 | OUT_RING(dev_priv->dri1.counter); |
765 | OUT_RING(MI_USER_INTERRUPT); |
765 | OUT_RING(MI_USER_INTERRUPT); |
766 | ADVANCE_LP_RING(); |
766 | ADVANCE_LP_RING(); |
767 | } |
767 | } |
768 | 768 | ||
769 | return dev_priv->dri1.counter; |
769 | return dev_priv->dri1.counter; |
770 | } |
770 | } |
771 | 771 | ||
772 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) |
772 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) |
773 | { |
773 | { |
774 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
774 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
775 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
775 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
776 | int ret = 0; |
776 | int ret = 0; |
777 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
777 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
778 | 778 | ||
779 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, |
779 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, |
780 | READ_BREADCRUMB(dev_priv)); |
780 | READ_BREADCRUMB(dev_priv)); |
781 | 781 | ||
782 | if (READ_BREADCRUMB(dev_priv) >= irq_nr) { |
782 | if (READ_BREADCRUMB(dev_priv) >= irq_nr) { |
783 | if (master_priv->sarea_priv) |
783 | if (master_priv->sarea_priv) |
784 | master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
784 | master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
785 | return 0; |
785 | return 0; |
786 | } |
786 | } |
787 | 787 | ||
788 | if (master_priv->sarea_priv) |
788 | if (master_priv->sarea_priv) |
789 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
789 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
790 | 790 | ||
791 | if (ring->irq_get(ring)) { |
791 | if (ring->irq_get(ring)) { |
792 | DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, |
792 | DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, |
793 | READ_BREADCRUMB(dev_priv) >= irq_nr); |
793 | READ_BREADCRUMB(dev_priv) >= irq_nr); |
794 | ring->irq_put(ring); |
794 | ring->irq_put(ring); |
795 | } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) |
795 | } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) |
796 | ret = -EBUSY; |
796 | ret = -EBUSY; |
797 | 797 | ||
798 | if (ret == -EBUSY) { |
798 | if (ret == -EBUSY) { |
799 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", |
799 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", |
800 | READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter); |
800 | READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter); |
801 | } |
801 | } |
802 | 802 | ||
803 | return ret; |
803 | return ret; |
804 | } |
804 | } |
805 | 805 | ||
806 | /* Needs the lock as it touches the ring. |
806 | /* Needs the lock as it touches the ring. |
807 | */ |
807 | */ |
808 | static int i915_irq_emit(struct drm_device *dev, void *data, |
808 | static int i915_irq_emit(struct drm_device *dev, void *data, |
809 | struct drm_file *file_priv) |
809 | struct drm_file *file_priv) |
810 | { |
810 | { |
811 | drm_i915_private_t *dev_priv = dev->dev_private; |
811 | drm_i915_private_t *dev_priv = dev->dev_private; |
812 | drm_i915_irq_emit_t *emit = data; |
812 | drm_i915_irq_emit_t *emit = data; |
813 | int result; |
813 | int result; |
814 | 814 | ||
815 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
815 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
816 | return -ENODEV; |
816 | return -ENODEV; |
817 | 817 | ||
818 | if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { |
818 | if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { |
819 | DRM_ERROR("called with no initialization\n"); |
819 | DRM_ERROR("called with no initialization\n"); |
820 | return -EINVAL; |
820 | return -EINVAL; |
821 | } |
821 | } |
822 | 822 | ||
823 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
823 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
824 | 824 | ||
825 | mutex_lock(&dev->struct_mutex); |
825 | mutex_lock(&dev->struct_mutex); |
826 | result = i915_emit_irq(dev); |
826 | result = i915_emit_irq(dev); |
827 | mutex_unlock(&dev->struct_mutex); |
827 | mutex_unlock(&dev->struct_mutex); |
828 | 828 | ||
829 | if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { |
829 | if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { |
830 | DRM_ERROR("copy_to_user\n"); |
830 | DRM_ERROR("copy_to_user\n"); |
831 | return -EFAULT; |
831 | return -EFAULT; |
832 | } |
832 | } |
833 | 833 | ||
834 | return 0; |
834 | return 0; |
835 | } |
835 | } |
836 | 836 | ||
837 | /* Doesn't need the hardware lock. |
837 | /* Doesn't need the hardware lock. |
838 | */ |
838 | */ |
839 | static int i915_irq_wait(struct drm_device *dev, void *data, |
839 | static int i915_irq_wait(struct drm_device *dev, void *data, |
840 | struct drm_file *file_priv) |
840 | struct drm_file *file_priv) |
841 | { |
841 | { |
842 | drm_i915_private_t *dev_priv = dev->dev_private; |
842 | drm_i915_private_t *dev_priv = dev->dev_private; |
843 | drm_i915_irq_wait_t *irqwait = data; |
843 | drm_i915_irq_wait_t *irqwait = data; |
844 | 844 | ||
845 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
845 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
846 | return -ENODEV; |
846 | return -ENODEV; |
847 | 847 | ||
848 | if (!dev_priv) { |
848 | if (!dev_priv) { |
849 | DRM_ERROR("called with no initialization\n"); |
849 | DRM_ERROR("called with no initialization\n"); |
850 | return -EINVAL; |
850 | return -EINVAL; |
851 | } |
851 | } |
852 | 852 | ||
853 | return i915_wait_irq(dev, irqwait->irq_seq); |
853 | return i915_wait_irq(dev, irqwait->irq_seq); |
854 | } |
854 | } |
855 | 855 | ||
856 | static int i915_vblank_pipe_get(struct drm_device *dev, void *data, |
856 | static int i915_vblank_pipe_get(struct drm_device *dev, void *data, |
857 | struct drm_file *file_priv) |
857 | struct drm_file *file_priv) |
858 | { |
858 | { |
859 | drm_i915_private_t *dev_priv = dev->dev_private; |
859 | drm_i915_private_t *dev_priv = dev->dev_private; |
860 | drm_i915_vblank_pipe_t *pipe = data; |
860 | drm_i915_vblank_pipe_t *pipe = data; |
861 | 861 | ||
862 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
862 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
863 | return -ENODEV; |
863 | return -ENODEV; |
864 | 864 | ||
865 | if (!dev_priv) { |
865 | if (!dev_priv) { |
866 | DRM_ERROR("called with no initialization\n"); |
866 | DRM_ERROR("called with no initialization\n"); |
867 | return -EINVAL; |
867 | return -EINVAL; |
868 | } |
868 | } |
869 | 869 | ||
870 | pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; |
870 | pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; |
871 | 871 | ||
872 | return 0; |
872 | return 0; |
873 | } |
873 | } |
874 | 874 | ||
875 | /** |
875 | /** |
876 | * Schedule buffer swap at given vertical blank. |
876 | * Schedule buffer swap at given vertical blank. |
877 | */ |
877 | */ |
878 | static int i915_vblank_swap(struct drm_device *dev, void *data, |
878 | static int i915_vblank_swap(struct drm_device *dev, void *data, |
879 | struct drm_file *file_priv) |
879 | struct drm_file *file_priv) |
880 | { |
880 | { |
881 | /* The delayed swap mechanism was fundamentally racy, and has been |
881 | /* The delayed swap mechanism was fundamentally racy, and has been |
882 | * removed. The model was that the client requested a delayed flip/swap |
882 | * removed. The model was that the client requested a delayed flip/swap |
883 | * from the kernel, then waited for vblank before continuing to perform |
883 | * from the kernel, then waited for vblank before continuing to perform |
884 | * rendering. The problem was that the kernel might wake the client |
884 | * rendering. The problem was that the kernel might wake the client |
885 | * up before it dispatched the vblank swap (since the lock has to be |
885 | * up before it dispatched the vblank swap (since the lock has to be |
886 | * held while touching the ringbuffer), in which case the client would |
886 | * held while touching the ringbuffer), in which case the client would |
887 | * clear and start the next frame before the swap occurred, and |
887 | * clear and start the next frame before the swap occurred, and |
888 | * flicker would occur in addition to likely missing the vblank. |
888 | * flicker would occur in addition to likely missing the vblank. |
889 | * |
889 | * |
890 | * In the absence of this ioctl, userland falls back to a correct path |
890 | * In the absence of this ioctl, userland falls back to a correct path |
891 | * of waiting for a vblank, then dispatching the swap on its own. |
891 | * of waiting for a vblank, then dispatching the swap on its own. |
892 | * Context switching to userland and back is plenty fast enough for |
892 | * Context switching to userland and back is plenty fast enough for |
893 | * meeting the requirements of vblank swapping. |
893 | * meeting the requirements of vblank swapping. |
894 | */ |
894 | */ |
895 | return -EINVAL; |
895 | return -EINVAL; |
896 | } |
896 | } |
897 | 897 | ||
898 | static int i915_flip_bufs(struct drm_device *dev, void *data, |
898 | static int i915_flip_bufs(struct drm_device *dev, void *data, |
899 | struct drm_file *file_priv) |
899 | struct drm_file *file_priv) |
900 | { |
900 | { |
901 | int ret; |
901 | int ret; |
902 | 902 | ||
903 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
903 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
904 | return -ENODEV; |
904 | return -ENODEV; |
905 | 905 | ||
906 | DRM_DEBUG_DRIVER("%s\n", __func__); |
906 | DRM_DEBUG_DRIVER("%s\n", __func__); |
907 | 907 | ||
908 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
908 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
909 | 909 | ||
910 | mutex_lock(&dev->struct_mutex); |
910 | mutex_lock(&dev->struct_mutex); |
911 | ret = i915_dispatch_flip(dev); |
911 | ret = i915_dispatch_flip(dev); |
912 | mutex_unlock(&dev->struct_mutex); |
912 | mutex_unlock(&dev->struct_mutex); |
913 | 913 | ||
914 | return ret; |
914 | return ret; |
915 | } |
915 | } |
916 | #endif |
916 | #endif |
917 | 917 | ||
918 | int i915_getparam(struct drm_device *dev, void *data, |
918 | int i915_getparam(struct drm_device *dev, void *data, |
919 | struct drm_file *file_priv) |
919 | struct drm_file *file_priv) |
920 | { |
920 | { |
921 | drm_i915_private_t *dev_priv = dev->dev_private; |
921 | drm_i915_private_t *dev_priv = dev->dev_private; |
922 | drm_i915_getparam_t *param = data; |
922 | drm_i915_getparam_t *param = data; |
923 | int value; |
923 | int value; |
924 | 924 | ||
925 | if (!dev_priv) { |
925 | if (!dev_priv) { |
926 | DRM_ERROR("called with no initialization\n"); |
926 | DRM_ERROR("called with no initialization\n"); |
927 | return -EINVAL; |
927 | return -EINVAL; |
928 | } |
928 | } |
929 | 929 | ||
930 | switch (param->param) { |
930 | switch (param->param) { |
931 | case I915_PARAM_IRQ_ACTIVE: |
931 | case I915_PARAM_IRQ_ACTIVE: |
932 | value = dev->pdev->irq ? 1 : 0; |
932 | value = dev->pdev->irq ? 1 : 0; |
933 | break; |
933 | break; |
934 | case I915_PARAM_ALLOW_BATCHBUFFER: |
934 | case I915_PARAM_ALLOW_BATCHBUFFER: |
935 | value = dev_priv->dri1.allow_batchbuffer ? 1 : 0; |
935 | value = dev_priv->dri1.allow_batchbuffer ? 1 : 0; |
936 | break; |
936 | break; |
937 | case I915_PARAM_LAST_DISPATCH: |
937 | case I915_PARAM_LAST_DISPATCH: |
938 | value = READ_BREADCRUMB(dev_priv); |
938 | value = READ_BREADCRUMB(dev_priv); |
939 | break; |
939 | break; |
940 | case I915_PARAM_CHIPSET_ID: |
940 | case I915_PARAM_CHIPSET_ID: |
941 | value = dev->pci_device; |
941 | value = dev->pci_device; |
942 | break; |
942 | break; |
943 | case I915_PARAM_HAS_GEM: |
943 | case I915_PARAM_HAS_GEM: |
944 | value = 1; |
944 | value = 1; |
945 | break; |
945 | break; |
946 | case I915_PARAM_NUM_FENCES_AVAIL: |
946 | case I915_PARAM_NUM_FENCES_AVAIL: |
947 | value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; |
947 | value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; |
948 | break; |
948 | break; |
949 | case I915_PARAM_HAS_OVERLAY: |
949 | case I915_PARAM_HAS_OVERLAY: |
950 | value = dev_priv->overlay ? 1 : 0; |
950 | value = dev_priv->overlay ? 1 : 0; |
951 | break; |
951 | break; |
952 | case I915_PARAM_HAS_PAGEFLIPPING: |
952 | case I915_PARAM_HAS_PAGEFLIPPING: |
953 | value = 1; |
953 | value = 1; |
954 | break; |
954 | break; |
955 | case I915_PARAM_HAS_EXECBUF2: |
955 | case I915_PARAM_HAS_EXECBUF2: |
956 | /* depends on GEM */ |
956 | /* depends on GEM */ |
957 | value = 1; |
957 | value = 1; |
958 | break; |
958 | break; |
959 | case I915_PARAM_HAS_BSD: |
959 | case I915_PARAM_HAS_BSD: |
960 | value = intel_ring_initialized(&dev_priv->ring[VCS]); |
960 | value = intel_ring_initialized(&dev_priv->ring[VCS]); |
961 | break; |
961 | break; |
962 | case I915_PARAM_HAS_BLT: |
962 | case I915_PARAM_HAS_BLT: |
963 | value = intel_ring_initialized(&dev_priv->ring[BCS]); |
963 | value = intel_ring_initialized(&dev_priv->ring[BCS]); |
964 | break; |
964 | break; |
965 | case I915_PARAM_HAS_VEBOX: |
965 | case I915_PARAM_HAS_VEBOX: |
966 | value = intel_ring_initialized(&dev_priv->ring[VECS]); |
966 | value = intel_ring_initialized(&dev_priv->ring[VECS]); |
967 | break; |
967 | break; |
968 | case I915_PARAM_HAS_RELAXED_FENCING: |
968 | case I915_PARAM_HAS_RELAXED_FENCING: |
969 | value = 1; |
969 | value = 1; |
970 | break; |
970 | break; |
971 | case I915_PARAM_HAS_COHERENT_RINGS: |
971 | case I915_PARAM_HAS_COHERENT_RINGS: |
972 | value = 1; |
972 | value = 1; |
973 | break; |
973 | break; |
974 | case I915_PARAM_HAS_EXEC_CONSTANTS: |
974 | case I915_PARAM_HAS_EXEC_CONSTANTS: |
975 | value = INTEL_INFO(dev)->gen >= 4; |
975 | value = INTEL_INFO(dev)->gen >= 4; |
976 | break; |
976 | break; |
977 | case I915_PARAM_HAS_RELAXED_DELTA: |
977 | case I915_PARAM_HAS_RELAXED_DELTA: |
978 | value = 1; |
978 | value = 1; |
979 | break; |
979 | break; |
980 | case I915_PARAM_HAS_GEN7_SOL_RESET: |
980 | case I915_PARAM_HAS_GEN7_SOL_RESET: |
981 | value = 1; |
981 | value = 1; |
982 | break; |
982 | break; |
983 | case I915_PARAM_HAS_LLC: |
983 | case I915_PARAM_HAS_LLC: |
984 | value = HAS_LLC(dev); |
984 | value = HAS_LLC(dev); |
985 | break; |
985 | break; |
986 | case I915_PARAM_HAS_WT: |
986 | case I915_PARAM_HAS_WT: |
987 | value = HAS_WT(dev); |
987 | value = HAS_WT(dev); |
988 | break; |
988 | break; |
989 | case I915_PARAM_HAS_ALIASING_PPGTT: |
989 | case I915_PARAM_HAS_ALIASING_PPGTT: |
990 | value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; |
990 | value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; |
991 | break; |
991 | break; |
992 | case I915_PARAM_HAS_WAIT_TIMEOUT: |
992 | case I915_PARAM_HAS_WAIT_TIMEOUT: |
993 | value = 1; |
993 | value = 1; |
994 | break; |
994 | break; |
995 | case I915_PARAM_HAS_SEMAPHORES: |
995 | case I915_PARAM_HAS_SEMAPHORES: |
996 | value = i915_semaphore_is_enabled(dev); |
996 | value = i915_semaphore_is_enabled(dev); |
997 | break; |
997 | break; |
998 | case I915_PARAM_HAS_PRIME_VMAP_FLUSH: |
998 | case I915_PARAM_HAS_PRIME_VMAP_FLUSH: |
999 | value = 1; |
999 | value = 1; |
1000 | break; |
1000 | break; |
1001 | case I915_PARAM_HAS_SECURE_BATCHES: |
1001 | case I915_PARAM_HAS_SECURE_BATCHES: |
1002 | value = 1; |
1002 | value = 1; |
1003 | break; |
1003 | break; |
1004 | case I915_PARAM_HAS_PINNED_BATCHES: |
1004 | case I915_PARAM_HAS_PINNED_BATCHES: |
1005 | value = 1; |
1005 | value = 1; |
1006 | break; |
1006 | break; |
1007 | case I915_PARAM_HAS_EXEC_NO_RELOC: |
1007 | case I915_PARAM_HAS_EXEC_NO_RELOC: |
1008 | value = 1; |
1008 | value = 1; |
1009 | break; |
1009 | break; |
1010 | case I915_PARAM_HAS_EXEC_HANDLE_LUT: |
1010 | case I915_PARAM_HAS_EXEC_HANDLE_LUT: |
1011 | value = 1; |
1011 | value = 1; |
1012 | break; |
1012 | break; |
1013 | default: |
1013 | default: |
1014 | DRM_DEBUG("Unknown parameter %d\n", param->param); |
1014 | DRM_DEBUG("Unknown parameter %d\n", param->param); |
1015 | return -EINVAL; |
1015 | return -EINVAL; |
1016 | } |
1016 | } |
1017 | 1017 | ||
1018 | *param->value = value; |
1018 | *param->value = value; |
1019 | 1019 | ||
1020 | return 0; |
1020 | return 0; |
1021 | } |
1021 | } |
1022 | 1022 | ||
1023 | #if 0 |
1023 | #if 0 |
1024 | static int i915_setparam(struct drm_device *dev, void *data, |
1024 | static int i915_setparam(struct drm_device *dev, void *data, |
1025 | struct drm_file *file_priv) |
1025 | struct drm_file *file_priv) |
1026 | { |
1026 | { |
1027 | drm_i915_private_t *dev_priv = dev->dev_private; |
1027 | drm_i915_private_t *dev_priv = dev->dev_private; |
1028 | drm_i915_setparam_t *param = data; |
1028 | drm_i915_setparam_t *param = data; |
1029 | 1029 | ||
1030 | if (!dev_priv) { |
1030 | if (!dev_priv) { |
1031 | DRM_ERROR("called with no initialization\n"); |
1031 | DRM_ERROR("called with no initialization\n"); |
1032 | return -EINVAL; |
1032 | return -EINVAL; |
1033 | } |
1033 | } |
1034 | 1034 | ||
1035 | switch (param->param) { |
1035 | switch (param->param) { |
1036 | case I915_SETPARAM_USE_MI_BATCHBUFFER_START: |
1036 | case I915_SETPARAM_USE_MI_BATCHBUFFER_START: |
1037 | break; |
1037 | break; |
1038 | case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: |
1038 | case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: |
1039 | break; |
1039 | break; |
1040 | case I915_SETPARAM_ALLOW_BATCHBUFFER: |
1040 | case I915_SETPARAM_ALLOW_BATCHBUFFER: |
1041 | dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; |
1041 | dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; |
1042 | break; |
1042 | break; |
1043 | case I915_SETPARAM_NUM_USED_FENCES: |
1043 | case I915_SETPARAM_NUM_USED_FENCES: |
1044 | if (param->value > dev_priv->num_fence_regs || |
1044 | if (param->value > dev_priv->num_fence_regs || |
1045 | param->value < 0) |
1045 | param->value < 0) |
1046 | return -EINVAL; |
1046 | return -EINVAL; |
1047 | /* Userspace can use first N regs */ |
1047 | /* Userspace can use first N regs */ |
1048 | dev_priv->fence_reg_start = param->value; |
1048 | dev_priv->fence_reg_start = param->value; |
1049 | break; |
1049 | break; |
1050 | default: |
1050 | default: |
1051 | DRM_DEBUG_DRIVER("unknown parameter %d\n", |
1051 | DRM_DEBUG_DRIVER("unknown parameter %d\n", |
1052 | param->param); |
1052 | param->param); |
1053 | return -EINVAL; |
1053 | return -EINVAL; |
1054 | } |
1054 | } |
1055 | 1055 | ||
1056 | return 0; |
1056 | return 0; |
1057 | } |
1057 | } |
1058 | #endif |
1058 | #endif |
1059 | 1059 | ||
1060 | 1060 | ||
1061 | 1061 | ||
1062 | static int i915_get_bridge_dev(struct drm_device *dev) |
1062 | static int i915_get_bridge_dev(struct drm_device *dev) |
1063 | { |
1063 | { |
1064 | struct drm_i915_private *dev_priv = dev->dev_private; |
1064 | struct drm_i915_private *dev_priv = dev->dev_private; |
1065 | 1065 | ||
1066 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); |
1066 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); |
1067 | if (!dev_priv->bridge_dev) { |
1067 | if (!dev_priv->bridge_dev) { |
1068 | DRM_ERROR("bridge device not found\n"); |
1068 | DRM_ERROR("bridge device not found\n"); |
1069 | return -1; |
1069 | return -1; |
1070 | } |
1070 | } |
1071 | return 0; |
1071 | return 0; |
1072 | } |
1072 | } |
1073 | 1073 | ||
1074 | #define MCHBAR_I915 0x44 |
1074 | #define MCHBAR_I915 0x44 |
1075 | #define MCHBAR_I965 0x48 |
1075 | #define MCHBAR_I965 0x48 |
1076 | #define MCHBAR_SIZE (4*4096) |
1076 | #define MCHBAR_SIZE (4*4096) |
1077 | 1077 | ||
1078 | #define DEVEN_REG 0x54 |
1078 | #define DEVEN_REG 0x54 |
1079 | #define DEVEN_MCHBAR_EN (1 << 28) |
1079 | #define DEVEN_MCHBAR_EN (1 << 28) |
1080 | 1080 | ||
1081 | 1081 | ||
1082 | 1082 | ||
1083 | 1083 | ||
1084 | /* Setup MCHBAR if possible, return true if we should disable it again */ |
1084 | /* Setup MCHBAR if possible, return true if we should disable it again */ |
1085 | static void |
1085 | static void |
1086 | intel_setup_mchbar(struct drm_device *dev) |
1086 | intel_setup_mchbar(struct drm_device *dev) |
1087 | { |
1087 | { |
1088 | drm_i915_private_t *dev_priv = dev->dev_private; |
1088 | drm_i915_private_t *dev_priv = dev->dev_private; |
1089 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
1089 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
1090 | u32 temp; |
1090 | u32 temp; |
1091 | bool enabled; |
1091 | bool enabled; |
1092 | 1092 | ||
1093 | dev_priv->mchbar_need_disable = false; |
1093 | dev_priv->mchbar_need_disable = false; |
1094 | 1094 | ||
1095 | if (IS_I915G(dev) || IS_I915GM(dev)) { |
1095 | if (IS_I915G(dev) || IS_I915GM(dev)) { |
1096 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); |
1096 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); |
1097 | enabled = !!(temp & DEVEN_MCHBAR_EN); |
1097 | enabled = !!(temp & DEVEN_MCHBAR_EN); |
1098 | } else { |
1098 | } else { |
1099 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); |
1099 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); |
1100 | enabled = temp & 1; |
1100 | enabled = temp & 1; |
1101 | } |
1101 | } |
1102 | 1102 | ||
1103 | /* If it's already enabled, don't have to do anything */ |
1103 | /* If it's already enabled, don't have to do anything */ |
1104 | if (enabled) |
1104 | if (enabled) |
1105 | return; |
1105 | return; |
1106 | 1106 | ||
1107 | dbgprintf("Epic fail\n"); |
1107 | dbgprintf("Epic fail\n"); |
1108 | 1108 | ||
1109 | #if 0 |
1109 | #if 0 |
1110 | if (intel_alloc_mchbar_resource(dev)) |
1110 | if (intel_alloc_mchbar_resource(dev)) |
1111 | return; |
1111 | return; |
1112 | 1112 | ||
1113 | dev_priv->mchbar_need_disable = true; |
1113 | dev_priv->mchbar_need_disable = true; |
1114 | 1114 | ||
1115 | /* Space is allocated or reserved, so enable it. */ |
1115 | /* Space is allocated or reserved, so enable it. */ |
1116 | if (IS_I915G(dev) || IS_I915GM(dev)) { |
1116 | if (IS_I915G(dev) || IS_I915GM(dev)) { |
1117 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, |
1117 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, |
1118 | temp | DEVEN_MCHBAR_EN); |
1118 | temp | DEVEN_MCHBAR_EN); |
1119 | } else { |
1119 | } else { |
1120 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); |
1120 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); |
1121 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); |
1121 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); |
1122 | } |
1122 | } |
1123 | #endif |
1123 | #endif |
1124 | } |
1124 | } |
1125 | 1125 | ||
1126 | 1126 | ||
1127 | /* true = enable decode, false = disable decoder */ |
1127 | /* true = enable decode, false = disable decoder */ |
1128 | static unsigned int i915_vga_set_decode(void *cookie, bool state) |
1128 | static unsigned int i915_vga_set_decode(void *cookie, bool state) |
1129 | { |
1129 | { |
1130 | struct drm_device *dev = cookie; |
1130 | struct drm_device *dev = cookie; |
1131 | 1131 | ||
1132 | intel_modeset_vga_set_state(dev, state); |
1132 | intel_modeset_vga_set_state(dev, state); |
1133 | if (state) |
1133 | if (state) |
1134 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | |
1134 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | |
1135 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
1135 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
1136 | else |
1136 | else |
1137 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
1137 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
1138 | } |
1138 | } |
1139 | 1139 | ||
1140 | 1140 | ||
1141 | 1141 | ||
1142 | 1142 | ||
1143 | 1143 | ||
1144 | 1144 | ||
1145 | static int i915_load_modeset_init(struct drm_device *dev) |
1145 | static int i915_load_modeset_init(struct drm_device *dev) |
1146 | { |
1146 | { |
1147 | struct drm_i915_private *dev_priv = dev->dev_private; |
1147 | struct drm_i915_private *dev_priv = dev->dev_private; |
1148 | int ret; |
1148 | int ret; |
1149 | 1149 | ||
1150 | ret = intel_parse_bios(dev); |
1150 | ret = intel_parse_bios(dev); |
1151 | if (ret) |
1151 | if (ret) |
1152 | DRM_INFO("failed to find VBIOS tables\n"); |
1152 | DRM_INFO("failed to find VBIOS tables\n"); |
1153 | 1153 | ||
1154 | fb_obj = kos_gem_fb_object_create(dev,0,12*1024*1024); |
1154 | fb_obj = kos_gem_fb_object_create(dev,0,12*1024*1024); |
1155 | 1155 | ||
1156 | /* Initialise stolen first so that we may reserve preallocated |
1156 | /* Initialise stolen first so that we may reserve preallocated |
1157 | * objects for the BIOS to KMS transition. |
1157 | * objects for the BIOS to KMS transition. |
1158 | */ |
1158 | */ |
1159 | ret = i915_gem_init_stolen(dev); |
1159 | ret = i915_gem_init_stolen(dev); |
1160 | if (ret) |
1160 | if (ret) |
1161 | goto cleanup_vga_switcheroo; |
1161 | goto cleanup_vga_switcheroo; |
1162 | 1162 | ||
1163 | ret = drm_irq_install(dev); |
1163 | ret = drm_irq_install(dev); |
1164 | if (ret) |
1164 | if (ret) |
1165 | goto cleanup_gem_stolen; |
1165 | goto cleanup_gem_stolen; |
1166 | 1166 | ||
1167 | /* Important: The output setup functions called by modeset_init need |
1167 | /* Important: The output setup functions called by modeset_init need |
1168 | * working irqs for e.g. gmbus and dp aux transfers. */ |
1168 | * working irqs for e.g. gmbus and dp aux transfers. */ |
1169 | intel_modeset_init(dev); |
1169 | intel_modeset_init(dev); |
1170 | 1170 | ||
1171 | ret = i915_gem_init(dev); |
1171 | ret = i915_gem_init(dev); |
1172 | if (ret) |
1172 | if (ret) |
1173 | goto cleanup_irq; |
1173 | goto cleanup_irq; |
1174 | 1174 | ||
1175 | 1175 | ||
1176 | intel_modeset_gem_init(dev); |
1176 | intel_modeset_gem_init(dev); |
1177 | 1177 | ||
1178 | /* Always safe in the mode setting case. */ |
1178 | /* Always safe in the mode setting case. */ |
1179 | /* FIXME: do pre/post-mode set stuff in core KMS code */ |
1179 | /* FIXME: do pre/post-mode set stuff in core KMS code */ |
1180 | dev->vblank_disable_allowed = 1; |
1180 | dev->vblank_disable_allowed = 1; |
1181 | if (INTEL_INFO(dev)->num_pipes == 0) |
1181 | if (INTEL_INFO(dev)->num_pipes == 0) |
1182 | return 0; |
1182 | return 0; |
1183 | 1183 | ||
1184 | ret = intel_fbdev_init(dev); |
1184 | ret = intel_fbdev_init(dev); |
1185 | if (ret) |
1185 | if (ret) |
1186 | goto cleanup_gem; |
1186 | goto cleanup_gem; |
1187 | 1187 | ||
1188 | /* Only enable hotplug handling once the fbdev is fully set up. */ |
1188 | /* Only enable hotplug handling once the fbdev is fully set up. */ |
1189 | intel_hpd_init(dev); |
1189 | intel_hpd_init(dev); |
1190 | 1190 | ||
1191 | /* |
1191 | /* |
1192 | * Some ports require correctly set-up hpd registers for detection to |
1192 | * Some ports require correctly set-up hpd registers for detection to |
1193 | * work properly (leading to ghost connected connector status), e.g. VGA |
1193 | * work properly (leading to ghost connected connector status), e.g. VGA |
1194 | * on gm45. Hence we can only set up the initial fbdev config after hpd |
1194 | * on gm45. Hence we can only set up the initial fbdev config after hpd |
1195 | * irqs are fully enabled. Now we should scan for the initial config |
1195 | * irqs are fully enabled. Now we should scan for the initial config |
1196 | * only once hotplug handling is enabled, but due to screwed-up locking |
1196 | * only once hotplug handling is enabled, but due to screwed-up locking |
1197 | * around kms/fbdev init we can't protect the fdbev initial config |
1197 | * around kms/fbdev init we can't protect the fdbev initial config |
1198 | * scanning against hotplug events. Hence do this first and ignore the |
1198 | * scanning against hotplug events. Hence do this first and ignore the |
1199 | * tiny window where we will loose hotplug notifactions. |
1199 | * tiny window where we will loose hotplug notifactions. |
1200 | */ |
1200 | */ |
1201 | intel_fbdev_initial_config(dev); |
1201 | intel_fbdev_initial_config(dev); |
1202 | 1202 | ||
1203 | /* Only enable hotplug handling once the fbdev is fully set up. */ |
1203 | /* Only enable hotplug handling once the fbdev is fully set up. */ |
1204 | dev_priv->enable_hotplug_processing = true; |
1204 | dev_priv->enable_hotplug_processing = true; |
1205 | 1205 | ||
1206 | drm_kms_helper_poll_init(dev); |
1206 | drm_kms_helper_poll_init(dev); |
1207 | 1207 | ||
1208 | return 0; |
1208 | return 0; |
1209 | 1209 | ||
1210 | cleanup_gem: |
1210 | cleanup_gem: |
1211 | mutex_lock(&dev->struct_mutex); |
1211 | mutex_lock(&dev->struct_mutex); |
1212 | i915_gem_cleanup_ringbuffer(dev); |
1212 | i915_gem_cleanup_ringbuffer(dev); |
1213 | i915_gem_context_fini(dev); |
1213 | i915_gem_context_fini(dev); |
1214 | mutex_unlock(&dev->struct_mutex); |
1214 | mutex_unlock(&dev->struct_mutex); |
1215 | i915_gem_cleanup_aliasing_ppgtt(dev); |
1215 | i915_gem_cleanup_aliasing_ppgtt(dev); |
1216 | cleanup_irq: |
1216 | cleanup_irq: |
1217 | // drm_irq_uninstall(dev); |
1217 | // drm_irq_uninstall(dev); |
1218 | cleanup_gem_stolen: |
1218 | cleanup_gem_stolen: |
1219 | // i915_gem_cleanup_stolen(dev); |
1219 | // i915_gem_cleanup_stolen(dev); |
1220 | cleanup_vga_switcheroo: |
1220 | cleanup_vga_switcheroo: |
1221 | // vga_switcheroo_unregister_client(dev->pdev); |
1221 | // vga_switcheroo_unregister_client(dev->pdev); |
1222 | cleanup_vga_client: |
1222 | cleanup_vga_client: |
1223 | // vga_client_register(dev->pdev, NULL, NULL, NULL); |
1223 | // vga_client_register(dev->pdev, NULL, NULL, NULL); |
1224 | out: |
1224 | out: |
1225 | return ret; |
1225 | return ret; |
1226 | } |
1226 | } |
1227 | 1227 | ||
1228 | 1228 | ||
1229 | 1229 | ||
1230 | 1230 | ||
1231 | static void i915_dump_device_info(struct drm_i915_private *dev_priv) |
1231 | static void i915_dump_device_info(struct drm_i915_private *dev_priv) |
1232 | { |
1232 | { |
1233 | const struct intel_device_info *info = dev_priv->info; |
1233 | const struct intel_device_info *info = dev_priv->info; |
1234 | 1234 | ||
1235 | #define PRINT_S(name) "%s" |
1235 | #define PRINT_S(name) "%s" |
1236 | #define SEP_EMPTY |
1236 | #define SEP_EMPTY |
1237 | #define PRINT_FLAG(name) info->name ? #name "," : "" |
1237 | #define PRINT_FLAG(name) info->name ? #name "," : "" |
1238 | #define SEP_COMMA , |
1238 | #define SEP_COMMA , |
1239 | DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" |
1239 | DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" |
1240 | DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY), |
1240 | DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY), |
1241 | info->gen, |
1241 | info->gen, |
1242 | dev_priv->dev->pdev->device, |
1242 | dev_priv->dev->pdev->device, |
1243 | DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA)); |
1243 | DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA)); |
1244 | #undef PRINT_S |
1244 | #undef PRINT_S |
1245 | #undef SEP_EMPTY |
1245 | #undef SEP_EMPTY |
1246 | #undef PRINT_FLAG |
1246 | #undef PRINT_FLAG |
1247 | #undef SEP_COMMA |
1247 | #undef SEP_COMMA |
1248 | } |
1248 | } |
1249 | 1249 | ||
1250 | /** |
1250 | /** |
1251 | * i915_driver_load - setup chip and create an initial config |
1251 | * i915_driver_load - setup chip and create an initial config |
1252 | * @dev: DRM device |
1252 | * @dev: DRM device |
1253 | * @flags: startup flags |
1253 | * @flags: startup flags |
1254 | * |
1254 | * |
1255 | * The driver load routine has to do several things: |
1255 | * The driver load routine has to do several things: |
1256 | * - drive output discovery via intel_modeset_init() |
1256 | * - drive output discovery via intel_modeset_init() |
1257 | * - initialize the memory manager |
1257 | * - initialize the memory manager |
1258 | * - allocate initial config memory |
1258 | * - allocate initial config memory |
1259 | * - setup the DRM framebuffer with the allocated memory |
1259 | * - setup the DRM framebuffer with the allocated memory |
1260 | */ |
1260 | */ |
1261 | int i915_driver_load(struct drm_device *dev, unsigned long flags) |
1261 | int i915_driver_load(struct drm_device *dev, unsigned long flags) |
1262 | { |
1262 | { |
1263 | struct drm_i915_private *dev_priv; |
1263 | struct drm_i915_private *dev_priv; |
1264 | struct intel_device_info *info; |
1264 | struct intel_device_info *info; |
1265 | int ret = 0, mmio_bar, mmio_size; |
1265 | int ret = 0, mmio_bar, mmio_size; |
1266 | uint32_t aperture_size; |
1266 | uint32_t aperture_size; |
1267 | 1267 | ||
1268 | info = (struct intel_device_info *) flags; |
1268 | info = (struct intel_device_info *) flags; |
1269 | 1269 | ||
1270 | 1270 | ||
1271 | dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); |
1271 | dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); |
1272 | if (dev_priv == NULL) |
1272 | if (dev_priv == NULL) |
1273 | return -ENOMEM; |
1273 | return -ENOMEM; |
1274 | 1274 | ||
1275 | dev->dev_private = (void *)dev_priv; |
1275 | dev->dev_private = (void *)dev_priv; |
1276 | dev_priv->dev = dev; |
1276 | dev_priv->dev = dev; |
1277 | dev_priv->info = info; |
1277 | dev_priv->info = info; |
1278 | 1278 | ||
1279 | spin_lock_init(&dev_priv->irq_lock); |
1279 | spin_lock_init(&dev_priv->irq_lock); |
1280 | spin_lock_init(&dev_priv->gpu_error.lock); |
1280 | spin_lock_init(&dev_priv->gpu_error.lock); |
1281 | spin_lock_init(&dev_priv->backlight.lock); |
1281 | spin_lock_init(&dev_priv->backlight.lock); |
1282 | spin_lock_init(&dev_priv->uncore.lock); |
1282 | spin_lock_init(&dev_priv->uncore.lock); |
1283 | spin_lock_init(&dev_priv->mm.object_stat_lock); |
1283 | spin_lock_init(&dev_priv->mm.object_stat_lock); |
1284 | mutex_init(&dev_priv->dpio_lock); |
1284 | mutex_init(&dev_priv->dpio_lock); |
1285 | mutex_init(&dev_priv->rps.hw_lock); |
1285 | mutex_init(&dev_priv->rps.hw_lock); |
1286 | mutex_init(&dev_priv->modeset_restore_lock); |
1286 | mutex_init(&dev_priv->modeset_restore_lock); |
1287 | 1287 | ||
1288 | mutex_init(&dev_priv->pc8.lock); |
1288 | mutex_init(&dev_priv->pc8.lock); |
1289 | dev_priv->pc8.requirements_met = false; |
1289 | dev_priv->pc8.requirements_met = false; |
1290 | dev_priv->pc8.gpu_idle = false; |
1290 | dev_priv->pc8.gpu_idle = false; |
1291 | dev_priv->pc8.irqs_disabled = false; |
1291 | dev_priv->pc8.irqs_disabled = false; |
1292 | dev_priv->pc8.enabled = false; |
1292 | dev_priv->pc8.enabled = false; |
1293 | dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ |
1293 | dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ |
1294 | INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); |
1294 | INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); |
1295 | 1295 | ||
1296 | i915_dump_device_info(dev_priv); |
1296 | i915_dump_device_info(dev_priv); |
1297 | 1297 | ||
1298 | /* Not all pre-production machines fall into this category, only the |
1298 | /* Not all pre-production machines fall into this category, only the |
1299 | * very first ones. Almost everything should work, except for maybe |
1299 | * very first ones. Almost everything should work, except for maybe |
1300 | * suspend/resume. And we don't implement workarounds that affect only |
1300 | * suspend/resume. And we don't implement workarounds that affect only |
1301 | * pre-production machines. */ |
1301 | * pre-production machines. */ |
1302 | if (IS_HSW_EARLY_SDV(dev)) |
1302 | if (IS_HSW_EARLY_SDV(dev)) |
1303 | DRM_INFO("This is an early pre-production Haswell machine. " |
1303 | DRM_INFO("This is an early pre-production Haswell machine. " |
1304 | "It may not be fully functional.\n"); |
1304 | "It may not be fully functional.\n"); |
1305 | 1305 | ||
1306 | if (i915_get_bridge_dev(dev)) { |
1306 | if (i915_get_bridge_dev(dev)) { |
1307 | ret = -EIO; |
1307 | ret = -EIO; |
1308 | goto free_priv; |
1308 | goto free_priv; |
1309 | } |
1309 | } |
1310 | 1310 | ||
1311 | mmio_bar = IS_GEN2(dev) ? 1 : 0; |
1311 | mmio_bar = IS_GEN2(dev) ? 1 : 0; |
1312 | /* Before gen4, the registers and the GTT are behind different BARs. |
1312 | /* Before gen4, the registers and the GTT are behind different BARs. |
1313 | * However, from gen4 onwards, the registers and the GTT are shared |
1313 | * However, from gen4 onwards, the registers and the GTT are shared |
1314 | * in the same BAR, so we want to restrict this ioremap from |
1314 | * in the same BAR, so we want to restrict this ioremap from |
1315 | * clobbering the GTT which we want ioremap_wc instead. Fortunately, |
1315 | * clobbering the GTT which we want ioremap_wc instead. Fortunately, |
1316 | * the register BAR remains the same size for all the earlier |
1316 | * the register BAR remains the same size for all the earlier |
1317 | * generations up to Ironlake. |
1317 | * generations up to Ironlake. |
1318 | */ |
1318 | */ |
1319 | if (info->gen < 5) |
1319 | if (info->gen < 5) |
1320 | mmio_size = 512*1024; |
1320 | mmio_size = 512*1024; |
1321 | else |
1321 | else |
1322 | mmio_size = 2*1024*1024; |
1322 | mmio_size = 2*1024*1024; |
1323 | 1323 | ||
1324 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); |
1324 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); |
1325 | if (!dev_priv->regs) { |
1325 | if (!dev_priv->regs) { |
1326 | DRM_ERROR("failed to map registers\n"); |
1326 | DRM_ERROR("failed to map registers\n"); |
1327 | ret = -EIO; |
1327 | ret = -EIO; |
1328 | goto put_bridge; |
1328 | goto put_bridge; |
1329 | } |
1329 | } |
1330 | 1330 | ||
1331 | intel_uncore_early_sanitize(dev); |
1331 | intel_uncore_early_sanitize(dev); |
1332 | 1332 | ||
1333 | if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) { |
1333 | if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) { |
1334 | /* The docs do not explain exactly how the calculation can be |
1334 | /* The docs do not explain exactly how the calculation can be |
1335 | * made. It is somewhat guessable, but for now, it's always |
1335 | * made. It is somewhat guessable, but for now, it's always |
1336 | * 128MB. |
1336 | * 128MB. |
1337 | * NB: We can't write IDICR yet because we do not have gt funcs |
1337 | * NB: We can't write IDICR yet because we do not have gt funcs |
1338 | * set up */ |
1338 | * set up */ |
1339 | dev_priv->ellc_size = 128; |
1339 | dev_priv->ellc_size = 128; |
1340 | DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); |
1340 | DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); |
1341 | } |
1341 | } |
1342 | 1342 | ||
1343 | ret = i915_gem_gtt_init(dev); |
1343 | ret = i915_gem_gtt_init(dev); |
1344 | if (ret) |
1344 | if (ret) |
1345 | goto put_bridge; |
1345 | goto put_bridge; |
1346 | 1346 | ||
1347 | 1347 | ||
1348 | pci_set_master(dev->pdev); |
1348 | pci_set_master(dev->pdev); |
1349 | 1349 | ||
1350 | /* overlay on gen2 is broken and can't address above 1G */ |
1350 | /* overlay on gen2 is broken and can't address above 1G */ |
1351 | 1351 | ||
1352 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) |
1352 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) |
1353 | * using 32bit addressing, overwriting memory if HWS is located |
1353 | * using 32bit addressing, overwriting memory if HWS is located |
1354 | * above 4GB. |
1354 | * above 4GB. |
1355 | * |
1355 | * |
1356 | * The documentation also mentions an issue with undefined |
1356 | * The documentation also mentions an issue with undefined |
1357 | * behaviour if any general state is accessed within a page above 4GB, |
1357 | * behaviour if any general state is accessed within a page above 4GB, |
1358 | * which also needs to be handled carefully. |
1358 | * which also needs to be handled carefully. |
1359 | */ |
1359 | */ |
1360 | 1360 | ||
1361 | aperture_size = dev_priv->gtt.mappable_end; |
1361 | aperture_size = dev_priv->gtt.mappable_end; |
- | 1362 | ||
- | 1363 | dev_priv->gtt.mappable = AllocKernelSpace(8192); |
|
- | 1364 | if (dev_priv->gtt.mappable == NULL) { |
|
- | 1365 | ret = -EIO; |
|
- | 1366 | goto out_rmmap; |
|
1362 | 1367 | } |
|
1363 | 1368 | ||
1364 | /* The i915 workqueue is primarily used for batched retirement of |
1369 | /* The i915 workqueue is primarily used for batched retirement of |
1365 | * requests (and thus managing bo) once the task has been completed |
1370 | * requests (and thus managing bo) once the task has been completed |
1366 | * by the GPU. i915_gem_retire_requests() is called directly when we |
1371 | * by the GPU. i915_gem_retire_requests() is called directly when we |
1367 | * need high-priority retirement, such as waiting for an explicit |
1372 | * need high-priority retirement, such as waiting for an explicit |
1368 | * bo. |
1373 | * bo. |
1369 | * |
1374 | * |
1370 | * It is also used for periodic low-priority events, such as |
1375 | * It is also used for periodic low-priority events, such as |
1371 | * idle-timers and recording error state. |
1376 | * idle-timers and recording error state. |
1372 | * |
1377 | * |
1373 | * All tasks on the workqueue are expected to acquire the dev mutex |
1378 | * All tasks on the workqueue are expected to acquire the dev mutex |
1374 | * so there is no point in running more than one instance of the |
1379 | * so there is no point in running more than one instance of the |
1375 | * workqueue at any time. Use an ordered one. |
1380 | * workqueue at any time. Use an ordered one. |
1376 | */ |
1381 | */ |
1377 | dev_priv->wq = alloc_ordered_workqueue("i915", 0); |
1382 | dev_priv->wq = alloc_ordered_workqueue("i915", 0); |
1378 | if (dev_priv->wq == NULL) { |
1383 | if (dev_priv->wq == NULL) { |
1379 | DRM_ERROR("Failed to create our workqueue.\n"); |
1384 | DRM_ERROR("Failed to create our workqueue.\n"); |
1380 | ret = -ENOMEM; |
1385 | ret = -ENOMEM; |
1381 | goto out_mtrrfree; |
1386 | goto out_mtrrfree; |
1382 | } |
1387 | } |
1383 | system_wq = dev_priv->wq; |
1388 | system_wq = dev_priv->wq; |
1384 | 1389 | ||
1385 | /* This must be called before any calls to HAS_PCH_* */ |
1390 | /* This must be called before any calls to HAS_PCH_* */ |
1386 | intel_detect_pch(dev); |
1391 | intel_detect_pch(dev); |
1387 | 1392 | ||
1388 | intel_irq_init(dev); |
1393 | intel_irq_init(dev); |
1389 | intel_pm_init(dev); |
1394 | intel_pm_init(dev); |
1390 | intel_uncore_sanitize(dev); |
1395 | intel_uncore_sanitize(dev); |
1391 | intel_uncore_init(dev); |
1396 | intel_uncore_init(dev); |
1392 | 1397 | ||
1393 | /* Try to make sure MCHBAR is enabled before poking at it */ |
1398 | /* Try to make sure MCHBAR is enabled before poking at it */ |
1394 | intel_setup_mchbar(dev); |
1399 | intel_setup_mchbar(dev); |
1395 | intel_setup_gmbus(dev); |
1400 | intel_setup_gmbus(dev); |
1396 | intel_opregion_setup(dev); |
1401 | intel_opregion_setup(dev); |
1397 | 1402 | ||
1398 | intel_setup_bios(dev); |
1403 | intel_setup_bios(dev); |
1399 | 1404 | ||
1400 | i915_gem_load(dev); |
1405 | i915_gem_load(dev); |
1401 | 1406 | ||
1402 | /* On the 945G/GM, the chipset reports the MSI capability on the |
1407 | /* On the 945G/GM, the chipset reports the MSI capability on the |
1403 | * integrated graphics even though the support isn't actually there |
1408 | * integrated graphics even though the support isn't actually there |
1404 | * according to the published specs. It doesn't appear to function |
1409 | * according to the published specs. It doesn't appear to function |
1405 | * correctly in testing on 945G. |
1410 | * correctly in testing on 945G. |
1406 | * This may be a side effect of MSI having been made available for PEG |
1411 | * This may be a side effect of MSI having been made available for PEG |
1407 | * and the registers being closely associated. |
1412 | * and the registers being closely associated. |
1408 | * |
1413 | * |
1409 | * According to chipset errata, on the 965GM, MSI interrupts may |
1414 | * According to chipset errata, on the 965GM, MSI interrupts may |
1410 | * be lost or delayed, but we use them anyways to avoid |
1415 | * be lost or delayed, but we use them anyways to avoid |
1411 | * stuck interrupts on some machines. |
1416 | * stuck interrupts on some machines. |
1412 | */ |
1417 | */ |
1413 | 1418 | ||
1414 | dev_priv->num_plane = 1; |
1419 | dev_priv->num_plane = 1; |
1415 | if (IS_VALLEYVIEW(dev)) |
1420 | if (IS_VALLEYVIEW(dev)) |
1416 | dev_priv->num_plane = 2; |
1421 | dev_priv->num_plane = 2; |
1417 | 1422 | ||
1418 | if (HAS_POWER_WELL(dev)) |
1423 | if (HAS_POWER_WELL(dev)) |
1419 | i915_init_power_well(dev); |
1424 | i915_init_power_well(dev); |
1420 | 1425 | ||
1421 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1426 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1422 | ret = i915_load_modeset_init(dev); |
1427 | ret = i915_load_modeset_init(dev); |
1423 | if (ret < 0) { |
1428 | if (ret < 0) { |
1424 | DRM_ERROR("failed to init modeset\n"); |
1429 | DRM_ERROR("failed to init modeset\n"); |
1425 | goto out_gem_unload; |
1430 | goto out_gem_unload; |
1426 | } |
1431 | } |
1427 | } else { |
1432 | } else { |
1428 | /* Start out suspended in ums mode. */ |
1433 | /* Start out suspended in ums mode. */ |
1429 | dev_priv->ums.mm_suspended = 1; |
1434 | dev_priv->ums.mm_suspended = 1; |
1430 | } |
1435 | } |
1431 | 1436 | ||
1432 | 1437 | ||
1433 | if (INTEL_INFO(dev)->num_pipes) { |
1438 | if (INTEL_INFO(dev)->num_pipes) { |
1434 | /* Must be done after probing outputs */ |
1439 | /* Must be done after probing outputs */ |
1435 | intel_opregion_init(dev); |
1440 | intel_opregion_init(dev); |
1436 | // acpi_video_register(); |
1441 | // acpi_video_register(); |
1437 | } |
1442 | } |
1438 | 1443 | ||
1439 | if (IS_GEN5(dev)) |
1444 | if (IS_GEN5(dev)) |
1440 | intel_gpu_ips_init(dev_priv); |
1445 | intel_gpu_ips_init(dev_priv); |
1441 | 1446 | ||
1442 | main_device = dev; |
1447 | main_device = dev; |
1443 | 1448 | ||
1444 | return 0; |
1449 | return 0; |
1445 | 1450 | ||
1446 | out_gem_unload: |
1451 | out_gem_unload: |
1447 | // if (dev_priv->mm.inactive_shrinker.shrink) |
1452 | // if (dev_priv->mm.inactive_shrinker.shrink) |
1448 | // unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
1453 | // unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
1449 | 1454 | ||
1450 | // if (dev->pdev->msi_enabled) |
1455 | // if (dev->pdev->msi_enabled) |
1451 | // pci_disable_msi(dev->pdev); |
1456 | // pci_disable_msi(dev->pdev); |
1452 | 1457 | ||
1453 | // intel_teardown_gmbus(dev); |
1458 | // intel_teardown_gmbus(dev); |
1454 | // intel_teardown_mchbar(dev); |
1459 | // intel_teardown_mchbar(dev); |
1455 | // destroy_workqueue(dev_priv->wq); |
1460 | // destroy_workqueue(dev_priv->wq); |
1456 | out_mtrrfree: |
1461 | out_mtrrfree: |
1457 | // arch_phys_wc_del(dev_priv->mm.gtt_mtrr); |
1462 | // arch_phys_wc_del(dev_priv->mm.gtt_mtrr); |
1458 | // io_mapping_free(dev_priv->gtt.mappable); |
1463 | // io_mapping_free(dev_priv->gtt.mappable); |
1459 | // dev_priv->gtt.gtt_remove(dev); |
1464 | // dev_priv->gtt.gtt_remove(dev); |
1460 | out_rmmap: |
1465 | out_rmmap: |
1461 | pci_iounmap(dev->pdev, dev_priv->regs); |
1466 | pci_iounmap(dev->pdev, dev_priv->regs); |
1462 | put_bridge: |
1467 | put_bridge: |
1463 | // pci_dev_put(dev_priv->bridge_dev); |
1468 | // pci_dev_put(dev_priv->bridge_dev); |
1464 | free_priv: |
1469 | free_priv: |
1465 | kfree(dev_priv); |
1470 | kfree(dev_priv); |
1466 | return ret; |
1471 | return ret; |
1467 | } |
1472 | } |
1468 | 1473 | ||
1469 | #if 0 |
1474 | #if 0 |
1470 | 1475 | ||
1471 | int i915_driver_unload(struct drm_device *dev) |
1476 | int i915_driver_unload(struct drm_device *dev) |
1472 | { |
1477 | { |
1473 | struct drm_i915_private *dev_priv = dev->dev_private; |
1478 | struct drm_i915_private *dev_priv = dev->dev_private; |
1474 | int ret; |
1479 | int ret; |
1475 | 1480 | ||
1476 | intel_gpu_ips_teardown(); |
1481 | intel_gpu_ips_teardown(); |
1477 | 1482 | ||
1478 | if (HAS_POWER_WELL(dev)) { |
1483 | if (HAS_POWER_WELL(dev)) { |
1479 | /* The i915.ko module is still not prepared to be loaded when |
1484 | /* The i915.ko module is still not prepared to be loaded when |
1480 | * the power well is not enabled, so just enable it in case |
1485 | * the power well is not enabled, so just enable it in case |
1481 | * we're going to unload/reload. */ |
1486 | * we're going to unload/reload. */ |
1482 | intel_set_power_well(dev, true); |
1487 | intel_set_power_well(dev, true); |
1483 | i915_remove_power_well(dev); |
1488 | i915_remove_power_well(dev); |
1484 | } |
1489 | } |
1485 | 1490 | ||
1486 | i915_teardown_sysfs(dev); |
1491 | i915_teardown_sysfs(dev); |
1487 | 1492 | ||
1488 | if (dev_priv->mm.inactive_shrinker.scan_objects) |
1493 | if (dev_priv->mm.inactive_shrinker.scan_objects) |
1489 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
1494 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
1490 | 1495 | ||
1491 | mutex_lock(&dev->struct_mutex); |
1496 | mutex_lock(&dev->struct_mutex); |
1492 | ret = i915_gpu_idle(dev); |
1497 | ret = i915_gpu_idle(dev); |
1493 | if (ret) |
1498 | if (ret) |
1494 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
1499 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
1495 | i915_gem_retire_requests(dev); |
1500 | i915_gem_retire_requests(dev); |
1496 | mutex_unlock(&dev->struct_mutex); |
1501 | mutex_unlock(&dev->struct_mutex); |
1497 | 1502 | ||
1498 | /* Cancel the retire work handler, which should be idle now. */ |
1503 | /* Cancel the retire work handler, which should be idle now. */ |
1499 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); |
1504 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); |
1500 | 1505 | ||
1501 | io_mapping_free(dev_priv->gtt.mappable); |
1506 | io_mapping_free(dev_priv->gtt.mappable); |
1502 | arch_phys_wc_del(dev_priv->gtt.mtrr); |
1507 | arch_phys_wc_del(dev_priv->gtt.mtrr); |
1503 | 1508 | ||
1504 | acpi_video_unregister(); |
1509 | acpi_video_unregister(); |
1505 | 1510 | ||
1506 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1511 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1507 | intel_fbdev_fini(dev); |
1512 | intel_fbdev_fini(dev); |
1508 | intel_modeset_cleanup(dev); |
1513 | intel_modeset_cleanup(dev); |
1509 | cancel_work_sync(&dev_priv->console_resume_work); |
1514 | cancel_work_sync(&dev_priv->console_resume_work); |
1510 | 1515 | ||
1511 | /* |
1516 | /* |
1512 | * free the memory space allocated for the child device |
1517 | * free the memory space allocated for the child device |
1513 | * config parsed from VBT |
1518 | * config parsed from VBT |
1514 | */ |
1519 | */ |
1515 | if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { |
1520 | if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { |
1516 | kfree(dev_priv->vbt.child_dev); |
1521 | kfree(dev_priv->vbt.child_dev); |
1517 | dev_priv->vbt.child_dev = NULL; |
1522 | dev_priv->vbt.child_dev = NULL; |
1518 | dev_priv->vbt.child_dev_num = 0; |
1523 | dev_priv->vbt.child_dev_num = 0; |
1519 | } |
1524 | } |
1520 | 1525 | ||
1521 | vga_switcheroo_unregister_client(dev->pdev); |
1526 | vga_switcheroo_unregister_client(dev->pdev); |
1522 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
1527 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
1523 | } |
1528 | } |
1524 | 1529 | ||
1525 | /* Free error state after interrupts are fully disabled. */ |
1530 | /* Free error state after interrupts are fully disabled. */ |
1526 | del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); |
1531 | del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); |
1527 | cancel_work_sync(&dev_priv->gpu_error.work); |
1532 | cancel_work_sync(&dev_priv->gpu_error.work); |
1528 | i915_destroy_error_state(dev); |
1533 | i915_destroy_error_state(dev); |
1529 | 1534 | ||
1530 | cancel_delayed_work_sync(&dev_priv->pc8.enable_work); |
1535 | cancel_delayed_work_sync(&dev_priv->pc8.enable_work); |
1531 | 1536 | ||
1532 | if (dev->pdev->msi_enabled) |
1537 | if (dev->pdev->msi_enabled) |
1533 | pci_disable_msi(dev->pdev); |
1538 | pci_disable_msi(dev->pdev); |
1534 | 1539 | ||
1535 | intel_opregion_fini(dev); |
1540 | intel_opregion_fini(dev); |
1536 | 1541 | ||
1537 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1542 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1538 | /* Flush any outstanding unpin_work. */ |
1543 | /* Flush any outstanding unpin_work. */ |
1539 | flush_workqueue(dev_priv->wq); |
1544 | flush_workqueue(dev_priv->wq); |
1540 | 1545 | ||
1541 | mutex_lock(&dev->struct_mutex); |
1546 | mutex_lock(&dev->struct_mutex); |
1542 | i915_gem_free_all_phys_object(dev); |
1547 | i915_gem_free_all_phys_object(dev); |
1543 | i915_gem_cleanup_ringbuffer(dev); |
1548 | i915_gem_cleanup_ringbuffer(dev); |
1544 | i915_gem_context_fini(dev); |
1549 | i915_gem_context_fini(dev); |
1545 | mutex_unlock(&dev->struct_mutex); |
1550 | mutex_unlock(&dev->struct_mutex); |
1546 | i915_gem_cleanup_aliasing_ppgtt(dev); |
1551 | i915_gem_cleanup_aliasing_ppgtt(dev); |
1547 | i915_gem_cleanup_stolen(dev); |
1552 | i915_gem_cleanup_stolen(dev); |
1548 | 1553 | ||
1549 | if (!I915_NEED_GFX_HWS(dev)) |
1554 | if (!I915_NEED_GFX_HWS(dev)) |
1550 | i915_free_hws(dev); |
1555 | i915_free_hws(dev); |
1551 | } |
1556 | } |
1552 | 1557 | ||
1553 | list_del(&dev_priv->gtt.base.global_link); |
1558 | list_del(&dev_priv->gtt.base.global_link); |
1554 | WARN_ON(!list_empty(&dev_priv->vm_list)); |
1559 | WARN_ON(!list_empty(&dev_priv->vm_list)); |
1555 | drm_mm_takedown(&dev_priv->gtt.base.mm); |
1560 | drm_mm_takedown(&dev_priv->gtt.base.mm); |
1556 | if (dev_priv->regs != NULL) |
1561 | if (dev_priv->regs != NULL) |
1557 | pci_iounmap(dev->pdev, dev_priv->regs); |
1562 | pci_iounmap(dev->pdev, dev_priv->regs); |
1558 | 1563 | ||
1559 | intel_teardown_gmbus(dev); |
1564 | intel_teardown_gmbus(dev); |
1560 | intel_teardown_mchbar(dev); |
1565 | intel_teardown_mchbar(dev); |
1561 | 1566 | ||
1562 | destroy_workqueue(dev_priv->wq); |
1567 | destroy_workqueue(dev_priv->wq); |
1563 | pm_qos_remove_request(&dev_priv->pm_qos); |
1568 | pm_qos_remove_request(&dev_priv->pm_qos); |
1564 | 1569 | ||
1565 | dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); |
1570 | dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); |
1566 | 1571 | ||
1567 | if (dev_priv->slab) |
1572 | if (dev_priv->slab) |
1568 | kmem_cache_destroy(dev_priv->slab); |
1573 | kmem_cache_destroy(dev_priv->slab); |
1569 | 1574 | ||
1570 | pci_dev_put(dev_priv->bridge_dev); |
1575 | pci_dev_put(dev_priv->bridge_dev); |
1571 | kfree(dev->dev_private); |
1576 | kfree(dev->dev_private); |
1572 | 1577 | ||
1573 | return 0; |
1578 | return 0; |
1574 | } |
1579 | } |
1575 | #endif |
1580 | #endif |
1576 | 1581 | ||
1577 | int i915_driver_open(struct drm_device *dev, struct drm_file *file) |
1582 | int i915_driver_open(struct drm_device *dev, struct drm_file *file) |
1578 | { |
1583 | { |
1579 | struct drm_i915_file_private *file_priv; |
1584 | struct drm_i915_file_private *file_priv; |
1580 | 1585 | ||
1581 | DRM_DEBUG_DRIVER("\n"); |
1586 | DRM_DEBUG_DRIVER("\n"); |
1582 | file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); |
1587 | file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); |
1583 | if (!file_priv) |
1588 | if (!file_priv) |
1584 | return -ENOMEM; |
1589 | return -ENOMEM; |
1585 | 1590 | ||
1586 | file->driver_priv = file_priv; |
1591 | file->driver_priv = file_priv; |
1587 | 1592 | ||
1588 | spin_lock_init(&file_priv->mm.lock); |
1593 | spin_lock_init(&file_priv->mm.lock); |
1589 | INIT_LIST_HEAD(&file_priv->mm.request_list); |
1594 | INIT_LIST_HEAD(&file_priv->mm.request_list); |
1590 | 1595 | ||
1591 | idr_init(&file_priv->context_idr); |
1596 | idr_init(&file_priv->context_idr); |
1592 | 1597 | ||
1593 | return 0; |
1598 | return 0; |
1594 | } |
1599 | } |
1595 | 1600 | ||
1596 | #if 0 |
1601 | #if 0 |
1597 | /** |
1602 | /** |
1598 | * i915_driver_lastclose - clean up after all DRM clients have exited |
1603 | * i915_driver_lastclose - clean up after all DRM clients have exited |
1599 | * @dev: DRM device |
1604 | * @dev: DRM device |
1600 | * |
1605 | * |
1601 | * Take care of cleaning up after all DRM clients have exited. In the |
1606 | * Take care of cleaning up after all DRM clients have exited. In the |
1602 | * mode setting case, we want to restore the kernel's initial mode (just |
1607 | * mode setting case, we want to restore the kernel's initial mode (just |
1603 | * in case the last client left us in a bad state). |
1608 | * in case the last client left us in a bad state). |
1604 | * |
1609 | * |
1605 | * Additionally, in the non-mode setting case, we'll tear down the GTT |
1610 | * Additionally, in the non-mode setting case, we'll tear down the GTT |
1606 | * and DMA structures, since the kernel won't be using them, and clea |
1611 | * and DMA structures, since the kernel won't be using them, and clea |
1607 | * up any GEM state. |
1612 | * up any GEM state. |
1608 | */ |
1613 | */ |
1609 | void i915_driver_lastclose(struct drm_device * dev) |
1614 | void i915_driver_lastclose(struct drm_device * dev) |
1610 | { |
1615 | { |
1611 | drm_i915_private_t *dev_priv = dev->dev_private; |
1616 | drm_i915_private_t *dev_priv = dev->dev_private; |
1612 | 1617 | ||
1613 | /* On gen6+ we refuse to init without kms enabled, but then the drm core |
1618 | /* On gen6+ we refuse to init without kms enabled, but then the drm core |
1614 | * goes right around and calls lastclose. Check for this and don't clean |
1619 | * goes right around and calls lastclose. Check for this and don't clean |
1615 | * up anything. */ |
1620 | * up anything. */ |
1616 | if (!dev_priv) |
1621 | if (!dev_priv) |
1617 | return; |
1622 | return; |
1618 | 1623 | ||
1619 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1624 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1620 | intel_fb_restore_mode(dev); |
1625 | intel_fb_restore_mode(dev); |
1621 | vga_switcheroo_process_delayed_switch(); |
1626 | vga_switcheroo_process_delayed_switch(); |
1622 | return; |
1627 | return; |
1623 | } |
1628 | } |
1624 | 1629 | ||
1625 | i915_gem_lastclose(dev); |
1630 | i915_gem_lastclose(dev); |
1626 | 1631 | ||
1627 | i915_dma_cleanup(dev); |
1632 | i915_dma_cleanup(dev); |
1628 | } |
1633 | } |
1629 | 1634 | ||
1630 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) |
1635 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) |
1631 | { |
1636 | { |
1632 | i915_gem_context_close(dev, file_priv); |
1637 | i915_gem_context_close(dev, file_priv); |
1633 | i915_gem_release(dev, file_priv); |
1638 | i915_gem_release(dev, file_priv); |
1634 | } |
1639 | } |
1635 | 1640 | ||
1636 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) |
1641 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) |
1637 | { |
1642 | { |
1638 | struct drm_i915_file_private *file_priv = file->driver_priv; |
1643 | struct drm_i915_file_private *file_priv = file->driver_priv; |
1639 | 1644 | ||
1640 | kfree(file_priv); |
1645 | kfree(file_priv); |
1641 | } |
1646 | } |
1642 | 1647 | ||
1643 | const struct drm_ioctl_desc i915_ioctls[] = { |
1648 | const struct drm_ioctl_desc i915_ioctls[] = { |
1644 | DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1649 | DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1645 | DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), |
1650 | DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), |
1646 | DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), |
1651 | DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), |
1647 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), |
1652 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), |
1648 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), |
1653 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), |
1649 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), |
1654 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), |
1650 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), |
1655 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), |
1651 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1656 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1652 | DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), |
1657 | DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), |
1653 | DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), |
1658 | DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), |
1654 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1659 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1655 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), |
1660 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), |
1656 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1661 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1657 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1662 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1658 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), |
1663 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), |
1659 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), |
1664 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), |
1660 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1665 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1661 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1666 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1662 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), |
1667 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), |
1663 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1668 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1664 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1669 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1665 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1670 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1666 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1671 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1667 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1672 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1668 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1673 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1669 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1674 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1670 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1675 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1671 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1676 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1672 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1677 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1673 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1678 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1674 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1679 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1675 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1680 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1676 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1681 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1677 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1682 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1678 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1683 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1679 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1684 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1680 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1685 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1681 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1686 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1682 | DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), |
1687 | DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), |
1683 | DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1688 | DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1684 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
1689 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
1685 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
1690 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
1686 | DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
1691 | DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
1687 | DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
1692 | DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
1688 | DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1693 | DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1689 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1694 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1690 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1695 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1691 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1696 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1692 | }; |
1697 | }; |
1693 | 1698 | ||
1694 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); |
1699 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); |
1695 | 1700 | ||
1696 | /* |
1701 | /* |
1697 | * This is really ugly: Because old userspace abused the linux agp interface to |
1702 | * This is really ugly: Because old userspace abused the linux agp interface to |
1698 | * manage the gtt, we need to claim that all intel devices are agp. For |
1703 | * manage the gtt, we need to claim that all intel devices are agp. For |
1699 | * otherwise the drm core refuses to initialize the agp support code. |
1704 | * otherwise the drm core refuses to initialize the agp support code. |
1700 | */ |
1705 | */ |
1701 | int i915_driver_device_is_agp(struct drm_device * dev) |
1706 | int i915_driver_device_is_agp(struct drm_device * dev) |
1702 | { |
1707 | { |
1703 | return 1; |
1708 | return 1; |
1704 | } |
1709 | } |
1705 | #endif>>><>>><>>>><>><>><>>>>>><>><>><>><>><>=>=>=>=>>>><>><>><>=>>> |
1710 | #endif>>><>>><>>>><>><>><>>>>>><>><>><>><>><>=>=>=>=>>>><>><>><>=>>> |