Rev 2338 | Rev 2340 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2338 | Rev 2339 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright © 2008-2010 Intel Corporation |
2 | * Copyright © 2008-2010 Intel Corporation |
3 | * |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
13 | * Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
21 | * IN THE SOFTWARE. |
22 | * |
22 | * |
23 | * Authors: |
23 | * Authors: |
24 | * Eric Anholt |
24 | * Eric Anholt |
25 | * Zou Nan hai |
25 | * Zou Nan hai |
26 | * Xiang Hai hao |
26 | * Xiang Hai hao |
27 | * |
27 | * |
28 | */ |
28 | */ |
29 | #define iowrite32(v, addr) writel((v), (addr)) |
29 | #define iowrite32(v, addr) writel((v), (addr)) |
30 | #define ioread32(addr) readl(addr) |
30 | #define ioread32(addr) readl(addr) |
31 | 31 | ||
32 | #include "drmP.h" |
32 | #include "drmP.h" |
33 | #include "drm.h" |
33 | #include "drm.h" |
34 | #include "i915_drv.h" |
34 | #include "i915_drv.h" |
35 | #include "i915_drm.h" |
35 | #include "i915_drm.h" |
36 | //#include "i915_trace.h" |
36 | //#include "i915_trace.h" |
37 | #include "intel_drv.h" |
37 | #include "intel_drv.h" |
38 | 38 | ||
39 | static inline int ring_space(struct intel_ring_buffer *ring) |
39 | static inline int ring_space(struct intel_ring_buffer *ring) |
40 | { |
40 | { |
41 | int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); |
41 | int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); |
42 | if (space < 0) |
42 | if (space < 0) |
43 | space += ring->size; |
43 | space += ring->size; |
44 | return space; |
44 | return space; |
45 | } |
45 | } |
46 | 46 | ||
47 | static u32 i915_gem_get_seqno(struct drm_device *dev) |
47 | static u32 i915_gem_get_seqno(struct drm_device *dev) |
48 | { |
48 | { |
49 | drm_i915_private_t *dev_priv = dev->dev_private; |
49 | drm_i915_private_t *dev_priv = dev->dev_private; |
50 | u32 seqno; |
50 | u32 seqno; |
51 | 51 | ||
52 | seqno = dev_priv->next_seqno; |
52 | seqno = dev_priv->next_seqno; |
53 | 53 | ||
54 | /* reserve 0 for non-seqno */ |
54 | /* reserve 0 for non-seqno */ |
55 | if (++dev_priv->next_seqno == 0) |
55 | if (++dev_priv->next_seqno == 0) |
56 | dev_priv->next_seqno = 1; |
56 | dev_priv->next_seqno = 1; |
57 | 57 | ||
58 | return seqno; |
58 | return seqno; |
59 | } |
59 | } |
60 | 60 | ||
61 | static int |
61 | static int |
62 | render_ring_flush(struct intel_ring_buffer *ring, |
62 | render_ring_flush(struct intel_ring_buffer *ring, |
63 | u32 invalidate_domains, |
63 | u32 invalidate_domains, |
64 | u32 flush_domains) |
64 | u32 flush_domains) |
65 | { |
65 | { |
66 | struct drm_device *dev = ring->dev; |
66 | struct drm_device *dev = ring->dev; |
67 | u32 cmd; |
67 | u32 cmd; |
68 | int ret; |
68 | int ret; |
69 | 69 | ||
70 | /* |
70 | /* |
71 | * read/write caches: |
71 | * read/write caches: |
72 | * |
72 | * |
73 | * I915_GEM_DOMAIN_RENDER is always invalidated, but is |
73 | * I915_GEM_DOMAIN_RENDER is always invalidated, but is |
74 | * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is |
74 | * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is |
75 | * also flushed at 2d versus 3d pipeline switches. |
75 | * also flushed at 2d versus 3d pipeline switches. |
76 | * |
76 | * |
77 | * read-only caches: |
77 | * read-only caches: |
78 | * |
78 | * |
79 | * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if |
79 | * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if |
80 | * MI_READ_FLUSH is set, and is always flushed on 965. |
80 | * MI_READ_FLUSH is set, and is always flushed on 965. |
81 | * |
81 | * |
82 | * I915_GEM_DOMAIN_COMMAND may not exist? |
82 | * I915_GEM_DOMAIN_COMMAND may not exist? |
83 | * |
83 | * |
84 | * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is |
84 | * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is |
85 | * invalidated when MI_EXE_FLUSH is set. |
85 | * invalidated when MI_EXE_FLUSH is set. |
86 | * |
86 | * |
87 | * I915_GEM_DOMAIN_VERTEX, which exists on 965, is |
87 | * I915_GEM_DOMAIN_VERTEX, which exists on 965, is |
88 | * invalidated with every MI_FLUSH. |
88 | * invalidated with every MI_FLUSH. |
89 | * |
89 | * |
90 | * TLBs: |
90 | * TLBs: |
91 | * |
91 | * |
92 | * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND |
92 | * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND |
93 | * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and |
93 | * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and |
94 | * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER |
94 | * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER |
95 | * are flushed at any MI_FLUSH. |
95 | * are flushed at any MI_FLUSH. |
96 | */ |
96 | */ |
97 | 97 | ||
98 | cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; |
98 | cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; |
99 | if ((invalidate_domains|flush_domains) & |
99 | if ((invalidate_domains|flush_domains) & |
100 | I915_GEM_DOMAIN_RENDER) |
100 | I915_GEM_DOMAIN_RENDER) |
101 | cmd &= ~MI_NO_WRITE_FLUSH; |
101 | cmd &= ~MI_NO_WRITE_FLUSH; |
102 | if (INTEL_INFO(dev)->gen < 4) { |
102 | if (INTEL_INFO(dev)->gen < 4) { |
103 | /* |
103 | /* |
104 | * On the 965, the sampler cache always gets flushed |
104 | * On the 965, the sampler cache always gets flushed |
105 | * and this bit is reserved. |
105 | * and this bit is reserved. |
106 | */ |
106 | */ |
107 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) |
107 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) |
108 | cmd |= MI_READ_FLUSH; |
108 | cmd |= MI_READ_FLUSH; |
109 | } |
109 | } |
110 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) |
110 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) |
111 | cmd |= MI_EXE_FLUSH; |
111 | cmd |= MI_EXE_FLUSH; |
112 | 112 | ||
113 | if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && |
113 | if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && |
114 | (IS_G4X(dev) || IS_GEN5(dev))) |
114 | (IS_G4X(dev) || IS_GEN5(dev))) |
115 | cmd |= MI_INVALIDATE_ISP; |
115 | cmd |= MI_INVALIDATE_ISP; |
116 | 116 | ||
117 | ret = intel_ring_begin(ring, 2); |
117 | ret = intel_ring_begin(ring, 2); |
118 | if (ret) |
118 | if (ret) |
119 | return ret; |
119 | return ret; |
120 | 120 | ||
121 | intel_ring_emit(ring, cmd); |
121 | intel_ring_emit(ring, cmd); |
122 | intel_ring_emit(ring, MI_NOOP); |
122 | intel_ring_emit(ring, MI_NOOP); |
123 | intel_ring_advance(ring); |
123 | intel_ring_advance(ring); |
124 | 124 | ||
125 | return 0; |
125 | return 0; |
126 | } |
126 | } |
127 | 127 | ||
128 | static void ring_write_tail(struct intel_ring_buffer *ring, |
128 | static void ring_write_tail(struct intel_ring_buffer *ring, |
129 | u32 value) |
129 | u32 value) |
130 | { |
130 | { |
131 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
131 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
132 | I915_WRITE_TAIL(ring, value); |
132 | I915_WRITE_TAIL(ring, value); |
133 | } |
133 | } |
134 | 134 | ||
135 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) |
135 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) |
136 | { |
136 | { |
137 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
137 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
138 | u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? |
138 | u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? |
139 | RING_ACTHD(ring->mmio_base) : ACTHD; |
139 | RING_ACTHD(ring->mmio_base) : ACTHD; |
140 | 140 | ||
141 | return I915_READ(acthd_reg); |
141 | return I915_READ(acthd_reg); |
142 | } |
142 | } |
143 | 143 | ||
144 | static int init_ring_common(struct intel_ring_buffer *ring) |
144 | static int init_ring_common(struct intel_ring_buffer *ring) |
145 | { |
145 | { |
146 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
146 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
147 | struct drm_i915_gem_object *obj = ring->obj; |
147 | struct drm_i915_gem_object *obj = ring->obj; |
148 | u32 head; |
148 | u32 head; |
149 | 149 | ||
150 | ENTER(); |
150 | ENTER(); |
151 | 151 | ||
152 | /* Stop the ring if it's running. */ |
152 | /* Stop the ring if it's running. */ |
153 | I915_WRITE_CTL(ring, 0); |
153 | I915_WRITE_CTL(ring, 0); |
154 | I915_WRITE_HEAD(ring, 0); |
154 | I915_WRITE_HEAD(ring, 0); |
155 | ring->write_tail(ring, 0); |
155 | ring->write_tail(ring, 0); |
156 | 156 | ||
157 | /* Initialize the ring. */ |
157 | /* Initialize the ring. */ |
158 | I915_WRITE_START(ring, obj->gtt_offset); |
158 | I915_WRITE_START(ring, obj->gtt_offset); |
159 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
159 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
160 | 160 | ||
161 | /* G45 ring initialization fails to reset head to zero */ |
161 | /* G45 ring initialization fails to reset head to zero */ |
162 | if (head != 0) { |
162 | if (head != 0) { |
163 | DRM_DEBUG_KMS("%s head not reset to zero " |
163 | DRM_DEBUG_KMS("%s head not reset to zero " |
164 | "ctl %08x head %08x tail %08x start %08x\n", |
164 | "ctl %08x head %08x tail %08x start %08x\n", |
165 | ring->name, |
165 | ring->name, |
166 | I915_READ_CTL(ring), |
166 | I915_READ_CTL(ring), |
167 | I915_READ_HEAD(ring), |
167 | I915_READ_HEAD(ring), |
168 | I915_READ_TAIL(ring), |
168 | I915_READ_TAIL(ring), |
169 | I915_READ_START(ring)); |
169 | I915_READ_START(ring)); |
170 | 170 | ||
171 | I915_WRITE_HEAD(ring, 0); |
171 | I915_WRITE_HEAD(ring, 0); |
172 | 172 | ||
173 | if (I915_READ_HEAD(ring) & HEAD_ADDR) { |
173 | if (I915_READ_HEAD(ring) & HEAD_ADDR) { |
174 | DRM_ERROR("failed to set %s head to zero " |
174 | DRM_ERROR("failed to set %s head to zero " |
175 | "ctl %08x head %08x tail %08x start %08x\n", |
175 | "ctl %08x head %08x tail %08x start %08x\n", |
176 | ring->name, |
176 | ring->name, |
177 | I915_READ_CTL(ring), |
177 | I915_READ_CTL(ring), |
178 | I915_READ_HEAD(ring), |
178 | I915_READ_HEAD(ring), |
179 | I915_READ_TAIL(ring), |
179 | I915_READ_TAIL(ring), |
180 | I915_READ_START(ring)); |
180 | I915_READ_START(ring)); |
181 | } |
181 | } |
182 | } |
182 | } |
183 | 183 | ||
184 | I915_WRITE_CTL(ring, |
184 | I915_WRITE_CTL(ring, |
185 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) |
185 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) |
186 | | RING_REPORT_64K | RING_VALID); |
186 | | RING_REPORT_64K | RING_VALID); |
187 | 187 | ||
188 | /* If the head is still not zero, the ring is dead */ |
188 | /* If the head is still not zero, the ring is dead */ |
189 | if ((I915_READ_CTL(ring) & RING_VALID) == 0 || |
189 | if ((I915_READ_CTL(ring) & RING_VALID) == 0 || |
190 | I915_READ_START(ring) != obj->gtt_offset || |
190 | I915_READ_START(ring) != obj->gtt_offset || |
191 | (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { |
191 | (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { |
192 | DRM_ERROR("%s initialization failed " |
192 | DRM_ERROR("%s initialization failed " |
193 | "ctl %08x head %08x tail %08x start %08x\n", |
193 | "ctl %08x head %08x tail %08x start %08x\n", |
194 | ring->name, |
194 | ring->name, |
195 | I915_READ_CTL(ring), |
195 | I915_READ_CTL(ring), |
196 | I915_READ_HEAD(ring), |
196 | I915_READ_HEAD(ring), |
197 | I915_READ_TAIL(ring), |
197 | I915_READ_TAIL(ring), |
198 | I915_READ_START(ring)); |
198 | I915_READ_START(ring)); |
199 | return -EIO; |
199 | return -EIO; |
200 | } |
200 | } |
201 | 201 | ||
202 | ring->head = I915_READ_HEAD(ring); |
202 | ring->head = I915_READ_HEAD(ring); |
203 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
203 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
204 | ring->space = ring_space(ring); |
204 | ring->space = ring_space(ring); |
205 | 205 | ||
206 | LEAVE(); |
206 | LEAVE(); |
207 | 207 | ||
208 | return 0; |
208 | return 0; |
209 | } |
209 | } |
210 | - | ||
211 | #if 0 |
- | |
212 | 210 | ||
213 | /* |
211 | /* |
214 | * 965+ support PIPE_CONTROL commands, which provide finer grained control |
212 | * 965+ support PIPE_CONTROL commands, which provide finer grained control |
215 | * over cache flushing. |
213 | * over cache flushing. |
216 | */ |
214 | */ |
217 | struct pipe_control { |
215 | struct pipe_control { |
218 | struct drm_i915_gem_object *obj; |
216 | struct drm_i915_gem_object *obj; |
219 | volatile u32 *cpu_page; |
217 | volatile u32 *cpu_page; |
220 | u32 gtt_offset; |
218 | u32 gtt_offset; |
221 | }; |
219 | }; |
222 | 220 | ||
223 | static int |
221 | static int |
224 | init_pipe_control(struct intel_ring_buffer *ring) |
222 | init_pipe_control(struct intel_ring_buffer *ring) |
225 | { |
223 | { |
226 | struct pipe_control *pc; |
224 | struct pipe_control *pc; |
227 | struct drm_i915_gem_object *obj; |
225 | struct drm_i915_gem_object *obj; |
228 | int ret; |
226 | int ret; |
229 | 227 | ||
230 | if (ring->private) |
228 | if (ring->private) |
231 | return 0; |
229 | return 0; |
232 | 230 | ||
233 | pc = kmalloc(sizeof(*pc), GFP_KERNEL); |
231 | pc = kmalloc(sizeof(*pc), GFP_KERNEL); |
234 | if (!pc) |
232 | if (!pc) |
235 | return -ENOMEM; |
233 | return -ENOMEM; |
236 | 234 | ||
237 | obj = i915_gem_alloc_object(ring->dev, 4096); |
235 | obj = i915_gem_alloc_object(ring->dev, 4096); |
238 | if (obj == NULL) { |
236 | if (obj == NULL) { |
239 | DRM_ERROR("Failed to allocate seqno page\n"); |
237 | DRM_ERROR("Failed to allocate seqno page\n"); |
240 | ret = -ENOMEM; |
238 | ret = -ENOMEM; |
241 | goto err; |
239 | goto err; |
242 | } |
240 | } |
243 | 241 | ||
244 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
242 | // i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
245 | 243 | ||
246 | ret = i915_gem_object_pin(obj, 4096, true); |
244 | ret = i915_gem_object_pin(obj, 4096, true); |
247 | if (ret) |
245 | if (ret) |
248 | goto err_unref; |
246 | goto err_unref; |
249 | 247 | ||
250 | pc->gtt_offset = obj->gtt_offset; |
248 | pc->gtt_offset = obj->gtt_offset; |
251 | pc->cpu_page = kmap(obj->pages[0]); |
249 | pc->cpu_page = (void*)MapIoMem(obj->pages[0], 4096, PG_SW); |
252 | if (pc->cpu_page == NULL) |
250 | if (pc->cpu_page == NULL) |
253 | goto err_unpin; |
251 | goto err_unpin; |
254 | 252 | ||
255 | pc->obj = obj; |
253 | pc->obj = obj; |
256 | ring->private = pc; |
254 | ring->private = pc; |
257 | return 0; |
255 | return 0; |
258 | 256 | ||
259 | err_unpin: |
257 | err_unpin: |
260 | i915_gem_object_unpin(obj); |
258 | // i915_gem_object_unpin(obj); |
261 | err_unref: |
259 | err_unref: |
262 | drm_gem_object_unreference(&obj->base); |
260 | // drm_gem_object_unreference(&obj->base); |
263 | err: |
261 | err: |
264 | kfree(pc); |
262 | kfree(pc); |
265 | return ret; |
263 | return ret; |
266 | } |
264 | } |
267 | 265 | ||
268 | static void |
266 | static void |
269 | cleanup_pipe_control(struct intel_ring_buffer *ring) |
267 | cleanup_pipe_control(struct intel_ring_buffer *ring) |
270 | { |
268 | { |
271 | struct pipe_control *pc = ring->private; |
269 | struct pipe_control *pc = ring->private; |
272 | struct drm_i915_gem_object *obj; |
270 | struct drm_i915_gem_object *obj; |
273 | 271 | ||
274 | if (!ring->private) |
272 | if (!ring->private) |
275 | return; |
273 | return; |
276 | 274 | ||
277 | obj = pc->obj; |
275 | obj = pc->obj; |
278 | kunmap(obj->pages[0]); |
276 | // kunmap(obj->pages[0]); |
279 | i915_gem_object_unpin(obj); |
277 | // i915_gem_object_unpin(obj); |
280 | drm_gem_object_unreference(&obj->base); |
278 | // drm_gem_object_unreference(&obj->base); |
281 | 279 | ||
282 | kfree(pc); |
280 | kfree(pc); |
283 | ring->private = NULL; |
281 | ring->private = NULL; |
284 | } |
282 | } |
285 | - | ||
286 | #endif |
- | |
287 | 283 | ||
288 | static int init_render_ring(struct intel_ring_buffer *ring) |
284 | static int init_render_ring(struct intel_ring_buffer *ring) |
289 | { |
285 | { |
290 | struct drm_device *dev = ring->dev; |
286 | struct drm_device *dev = ring->dev; |
291 | struct drm_i915_private *dev_priv = dev->dev_private; |
287 | struct drm_i915_private *dev_priv = dev->dev_private; |
292 | 288 | ||
293 | ENTER(); |
289 | ENTER(); |
294 | 290 | ||
295 | int ret = init_ring_common(ring); |
291 | int ret = init_ring_common(ring); |
296 | 292 | ||
297 | if (INTEL_INFO(dev)->gen > 3) { |
293 | if (INTEL_INFO(dev)->gen > 3) { |
298 | int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; |
294 | int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; |
299 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
295 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
300 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; |
296 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; |
301 | I915_WRITE(MI_MODE, mode); |
297 | I915_WRITE(MI_MODE, mode); |
302 | if (IS_GEN7(dev)) |
298 | if (IS_GEN7(dev)) |
303 | I915_WRITE(GFX_MODE_GEN7, |
299 | I915_WRITE(GFX_MODE_GEN7, |
304 | GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | |
300 | GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | |
305 | GFX_MODE_ENABLE(GFX_REPLAY_MODE)); |
301 | GFX_MODE_ENABLE(GFX_REPLAY_MODE)); |
306 | } |
302 | } |
307 | 303 | ||
308 | if (INTEL_INFO(dev)->gen >= 6) { |
304 | if (INTEL_INFO(dev)->gen >= 6) { |
309 | } else if (IS_GEN5(dev)) { |
305 | } else if (IS_GEN5(dev)) { |
310 | // ret = init_pipe_control(ring); |
306 | ret = init_pipe_control(ring); |
311 | if (ret) |
307 | if (ret) |
312 | return ret; |
308 | return ret; |
313 | } |
309 | } |
314 | 310 | ||
315 | LEAVE(); |
311 | LEAVE(); |
316 | 312 | ||
317 | return ret; |
313 | return ret; |
318 | } |
314 | } |
319 | - | ||
320 | #if 0 |
- | |
321 | 315 | ||
322 | static void render_ring_cleanup(struct intel_ring_buffer *ring) |
316 | static void render_ring_cleanup(struct intel_ring_buffer *ring) |
323 | { |
317 | { |
324 | if (!ring->private) |
318 | if (!ring->private) |
325 | return; |
319 | return; |
326 | 320 | ||
327 | cleanup_pipe_control(ring); |
321 | cleanup_pipe_control(ring); |
328 | } |
322 | } |
329 | 323 | ||
330 | static void |
324 | static void |
331 | update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno) |
325 | update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno) |
332 | { |
326 | { |
333 | struct drm_device *dev = ring->dev; |
327 | struct drm_device *dev = ring->dev; |
334 | struct drm_i915_private *dev_priv = dev->dev_private; |
328 | struct drm_i915_private *dev_priv = dev->dev_private; |
335 | int id; |
329 | int id; |
336 | 330 | ||
337 | /* |
331 | /* |
338 | * cs -> 1 = vcs, 0 = bcs |
332 | * cs -> 1 = vcs, 0 = bcs |
339 | * vcs -> 1 = bcs, 0 = cs, |
333 | * vcs -> 1 = bcs, 0 = cs, |
340 | * bcs -> 1 = cs, 0 = vcs. |
334 | * bcs -> 1 = cs, 0 = vcs. |
341 | */ |
335 | */ |
342 | id = ring - dev_priv->ring; |
336 | id = ring - dev_priv->ring; |
343 | id += 2 - i; |
337 | id += 2 - i; |
344 | id %= 3; |
338 | id %= 3; |
345 | 339 | ||
346 | intel_ring_emit(ring, |
340 | intel_ring_emit(ring, |
347 | MI_SEMAPHORE_MBOX | |
341 | MI_SEMAPHORE_MBOX | |
348 | MI_SEMAPHORE_REGISTER | |
342 | MI_SEMAPHORE_REGISTER | |
349 | MI_SEMAPHORE_UPDATE); |
343 | MI_SEMAPHORE_UPDATE); |
350 | intel_ring_emit(ring, seqno); |
344 | intel_ring_emit(ring, seqno); |
351 | intel_ring_emit(ring, |
345 | intel_ring_emit(ring, |
352 | RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i); |
346 | RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i); |
353 | } |
347 | } |
354 | 348 | ||
355 | static int |
349 | static int |
356 | gen6_add_request(struct intel_ring_buffer *ring, |
350 | gen6_add_request(struct intel_ring_buffer *ring, |
357 | u32 *result) |
351 | u32 *result) |
358 | { |
352 | { |
359 | u32 seqno; |
353 | u32 seqno; |
360 | int ret; |
354 | int ret; |
361 | 355 | ||
362 | ret = intel_ring_begin(ring, 10); |
356 | ret = intel_ring_begin(ring, 10); |
363 | if (ret) |
357 | if (ret) |
364 | return ret; |
358 | return ret; |
365 | 359 | ||
366 | seqno = i915_gem_get_seqno(ring->dev); |
360 | seqno = i915_gem_get_seqno(ring->dev); |
367 | update_semaphore(ring, 0, seqno); |
361 | update_semaphore(ring, 0, seqno); |
368 | update_semaphore(ring, 1, seqno); |
362 | update_semaphore(ring, 1, seqno); |
369 | 363 | ||
370 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
364 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
371 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
365 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
372 | intel_ring_emit(ring, seqno); |
366 | intel_ring_emit(ring, seqno); |
373 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
367 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
374 | intel_ring_advance(ring); |
368 | intel_ring_advance(ring); |
375 | 369 | ||
376 | *result = seqno; |
370 | *result = seqno; |
377 | return 0; |
371 | return 0; |
378 | } |
372 | } |
379 | 373 | ||
380 | int |
374 | int |
381 | intel_ring_sync(struct intel_ring_buffer *ring, |
375 | intel_ring_sync(struct intel_ring_buffer *ring, |
382 | struct intel_ring_buffer *to, |
376 | struct intel_ring_buffer *to, |
383 | u32 seqno) |
377 | u32 seqno) |
384 | { |
378 | { |
385 | int ret; |
379 | int ret; |
386 | 380 | ||
387 | ret = intel_ring_begin(ring, 4); |
381 | ret = intel_ring_begin(ring, 4); |
388 | if (ret) |
382 | if (ret) |
389 | return ret; |
383 | return ret; |
390 | 384 | ||
391 | intel_ring_emit(ring, |
385 | intel_ring_emit(ring, |
392 | MI_SEMAPHORE_MBOX | |
386 | MI_SEMAPHORE_MBOX | |
393 | MI_SEMAPHORE_REGISTER | |
387 | MI_SEMAPHORE_REGISTER | |
394 | intel_ring_sync_index(ring, to) << 17 | |
388 | intel_ring_sync_index(ring, to) << 17 | |
395 | MI_SEMAPHORE_COMPARE); |
389 | MI_SEMAPHORE_COMPARE); |
396 | intel_ring_emit(ring, seqno); |
390 | intel_ring_emit(ring, seqno); |
397 | intel_ring_emit(ring, 0); |
391 | intel_ring_emit(ring, 0); |
398 | intel_ring_emit(ring, MI_NOOP); |
392 | intel_ring_emit(ring, MI_NOOP); |
399 | intel_ring_advance(ring); |
393 | intel_ring_advance(ring); |
400 | 394 | ||
401 | return 0; |
395 | return 0; |
402 | } |
396 | } |
403 | 397 | ||
404 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ |
398 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ |
405 | do { \ |
399 | do { \ |
406 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ |
400 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ |
407 | PIPE_CONTROL_DEPTH_STALL | 2); \ |
401 | PIPE_CONTROL_DEPTH_STALL | 2); \ |
408 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ |
402 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ |
409 | intel_ring_emit(ring__, 0); \ |
403 | intel_ring_emit(ring__, 0); \ |
410 | intel_ring_emit(ring__, 0); \ |
404 | intel_ring_emit(ring__, 0); \ |
411 | } while (0) |
405 | } while (0) |
412 | 406 | ||
413 | static int |
407 | static int |
414 | pc_render_add_request(struct intel_ring_buffer *ring, |
408 | pc_render_add_request(struct intel_ring_buffer *ring, |
415 | u32 *result) |
409 | u32 *result) |
416 | { |
410 | { |
417 | struct drm_device *dev = ring->dev; |
411 | struct drm_device *dev = ring->dev; |
418 | u32 seqno = i915_gem_get_seqno(dev); |
412 | u32 seqno = i915_gem_get_seqno(dev); |
419 | struct pipe_control *pc = ring->private; |
413 | struct pipe_control *pc = ring->private; |
420 | u32 scratch_addr = pc->gtt_offset + 128; |
414 | u32 scratch_addr = pc->gtt_offset + 128; |
421 | int ret; |
415 | int ret; |
422 | 416 | ||
423 | /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently |
417 | /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently |
424 | * incoherent with writes to memory, i.e. completely fubar, |
418 | * incoherent with writes to memory, i.e. completely fubar, |
425 | * so we need to use PIPE_NOTIFY instead. |
419 | * so we need to use PIPE_NOTIFY instead. |
426 | * |
420 | * |
427 | * However, we also need to workaround the qword write |
421 | * However, we also need to workaround the qword write |
428 | * incoherence by flushing the 6 PIPE_NOTIFY buffers out to |
422 | * incoherence by flushing the 6 PIPE_NOTIFY buffers out to |
429 | * memory before requesting an interrupt. |
423 | * memory before requesting an interrupt. |
430 | */ |
424 | */ |
431 | ret = intel_ring_begin(ring, 32); |
425 | ret = intel_ring_begin(ring, 32); |
432 | if (ret) |
426 | if (ret) |
433 | return ret; |
427 | return ret; |
434 | 428 | ||
435 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | |
429 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | |
436 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); |
430 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); |
437 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
431 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
438 | intel_ring_emit(ring, seqno); |
432 | intel_ring_emit(ring, seqno); |
439 | intel_ring_emit(ring, 0); |
433 | intel_ring_emit(ring, 0); |
440 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
434 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
441 | scratch_addr += 128; /* write to separate cachelines */ |
435 | scratch_addr += 128; /* write to separate cachelines */ |
442 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
436 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
443 | scratch_addr += 128; |
437 | scratch_addr += 128; |
444 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
438 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
445 | scratch_addr += 128; |
439 | scratch_addr += 128; |
446 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
440 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
447 | scratch_addr += 128; |
441 | scratch_addr += 128; |
448 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
442 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
449 | scratch_addr += 128; |
443 | scratch_addr += 128; |
450 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
444 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
451 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | |
445 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | |
452 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | |
446 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | |
453 | PIPE_CONTROL_NOTIFY); |
447 | PIPE_CONTROL_NOTIFY); |
454 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
448 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
455 | intel_ring_emit(ring, seqno); |
449 | intel_ring_emit(ring, seqno); |
456 | intel_ring_emit(ring, 0); |
450 | intel_ring_emit(ring, 0); |
457 | intel_ring_advance(ring); |
451 | intel_ring_advance(ring); |
458 | 452 | ||
459 | *result = seqno; |
453 | *result = seqno; |
460 | return 0; |
454 | return 0; |
461 | } |
455 | } |
462 | 456 | ||
463 | static int |
457 | static int |
464 | render_ring_add_request(struct intel_ring_buffer *ring, |
458 | render_ring_add_request(struct intel_ring_buffer *ring, |
465 | u32 *result) |
459 | u32 *result) |
466 | { |
460 | { |
467 | struct drm_device *dev = ring->dev; |
461 | struct drm_device *dev = ring->dev; |
468 | u32 seqno = i915_gem_get_seqno(dev); |
462 | u32 seqno = i915_gem_get_seqno(dev); |
469 | int ret; |
463 | int ret; |
470 | 464 | ||
471 | ret = intel_ring_begin(ring, 4); |
465 | ret = intel_ring_begin(ring, 4); |
472 | if (ret) |
466 | if (ret) |
473 | return ret; |
467 | return ret; |
474 | 468 | ||
475 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
469 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
476 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
470 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
477 | intel_ring_emit(ring, seqno); |
471 | intel_ring_emit(ring, seqno); |
478 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
472 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
479 | intel_ring_advance(ring); |
473 | intel_ring_advance(ring); |
480 | 474 | ||
481 | *result = seqno; |
475 | *result = seqno; |
482 | return 0; |
476 | return 0; |
483 | } |
477 | } |
484 | 478 | ||
485 | static u32 |
479 | static u32 |
486 | ring_get_seqno(struct intel_ring_buffer *ring) |
480 | ring_get_seqno(struct intel_ring_buffer *ring) |
487 | { |
481 | { |
488 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
482 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
489 | } |
483 | } |
490 | 484 | ||
491 | static u32 |
485 | static u32 |
492 | pc_render_get_seqno(struct intel_ring_buffer *ring) |
486 | pc_render_get_seqno(struct intel_ring_buffer *ring) |
493 | { |
487 | { |
494 | struct pipe_control *pc = ring->private; |
488 | struct pipe_control *pc = ring->private; |
495 | return pc->cpu_page[0]; |
489 | return pc->cpu_page[0]; |
496 | } |
490 | } |
497 | 491 | ||
498 | static void |
492 | static void |
499 | ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask) |
493 | ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask) |
500 | { |
494 | { |
501 | dev_priv->gt_irq_mask &= ~mask; |
495 | dev_priv->gt_irq_mask &= ~mask; |
502 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
496 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
503 | POSTING_READ(GTIMR); |
497 | POSTING_READ(GTIMR); |
504 | } |
498 | } |
505 | 499 | ||
506 | static void |
500 | static void |
507 | ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask) |
501 | ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask) |
508 | { |
502 | { |
509 | dev_priv->gt_irq_mask |= mask; |
503 | dev_priv->gt_irq_mask |= mask; |
510 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
504 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
511 | POSTING_READ(GTIMR); |
505 | POSTING_READ(GTIMR); |
512 | } |
506 | } |
513 | 507 | ||
514 | static void |
508 | static void |
515 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) |
509 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) |
516 | { |
510 | { |
517 | dev_priv->irq_mask &= ~mask; |
511 | dev_priv->irq_mask &= ~mask; |
518 | I915_WRITE(IMR, dev_priv->irq_mask); |
512 | I915_WRITE(IMR, dev_priv->irq_mask); |
519 | POSTING_READ(IMR); |
513 | POSTING_READ(IMR); |
520 | } |
514 | } |
521 | 515 | ||
522 | static void |
516 | static void |
523 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) |
517 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) |
524 | { |
518 | { |
525 | dev_priv->irq_mask |= mask; |
519 | dev_priv->irq_mask |= mask; |
526 | I915_WRITE(IMR, dev_priv->irq_mask); |
520 | I915_WRITE(IMR, dev_priv->irq_mask); |
527 | POSTING_READ(IMR); |
521 | POSTING_READ(IMR); |
528 | } |
522 | } |
- | 523 | ||
529 | 524 | #if 0 |
|
530 | static bool |
525 | static bool |
531 | render_ring_get_irq(struct intel_ring_buffer *ring) |
526 | render_ring_get_irq(struct intel_ring_buffer *ring) |
532 | { |
527 | { |
533 | struct drm_device *dev = ring->dev; |
528 | struct drm_device *dev = ring->dev; |
534 | drm_i915_private_t *dev_priv = dev->dev_private; |
529 | drm_i915_private_t *dev_priv = dev->dev_private; |
535 | 530 | ||
536 | if (!dev->irq_enabled) |
531 | if (!dev->irq_enabled) |
537 | return false; |
532 | return false; |
538 | 533 | ||
539 | spin_lock(&ring->irq_lock); |
534 | spin_lock(&ring->irq_lock); |
540 | if (ring->irq_refcount++ == 0) { |
535 | if (ring->irq_refcount++ == 0) { |
541 | if (HAS_PCH_SPLIT(dev)) |
536 | if (HAS_PCH_SPLIT(dev)) |
542 | ironlake_enable_irq(dev_priv, |
537 | ironlake_enable_irq(dev_priv, |
543 | GT_PIPE_NOTIFY | GT_USER_INTERRUPT); |
538 | GT_PIPE_NOTIFY | GT_USER_INTERRUPT); |
544 | else |
539 | else |
545 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); |
540 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); |
546 | } |
541 | } |
547 | spin_unlock(&ring->irq_lock); |
542 | spin_unlock(&ring->irq_lock); |
548 | 543 | ||
549 | return true; |
544 | return true; |
550 | } |
545 | } |
551 | 546 | ||
552 | static void |
547 | static void |
553 | render_ring_put_irq(struct intel_ring_buffer *ring) |
548 | render_ring_put_irq(struct intel_ring_buffer *ring) |
554 | { |
549 | { |
555 | struct drm_device *dev = ring->dev; |
550 | struct drm_device *dev = ring->dev; |
556 | drm_i915_private_t *dev_priv = dev->dev_private; |
551 | drm_i915_private_t *dev_priv = dev->dev_private; |
557 | 552 | ||
558 | spin_lock(&ring->irq_lock); |
553 | spin_lock(&ring->irq_lock); |
559 | if (--ring->irq_refcount == 0) { |
554 | if (--ring->irq_refcount == 0) { |
560 | if (HAS_PCH_SPLIT(dev)) |
555 | if (HAS_PCH_SPLIT(dev)) |
561 | ironlake_disable_irq(dev_priv, |
556 | ironlake_disable_irq(dev_priv, |
562 | GT_USER_INTERRUPT | |
557 | GT_USER_INTERRUPT | |
563 | GT_PIPE_NOTIFY); |
558 | GT_PIPE_NOTIFY); |
564 | else |
559 | else |
565 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); |
560 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); |
566 | } |
561 | } |
567 | spin_unlock(&ring->irq_lock); |
562 | spin_unlock(&ring->irq_lock); |
568 | } |
563 | } |
569 | 564 | ||
570 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) |
565 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) |
571 | { |
566 | { |
572 | struct drm_device *dev = ring->dev; |
567 | struct drm_device *dev = ring->dev; |
573 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
568 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
574 | u32 mmio = 0; |
569 | u32 mmio = 0; |
575 | 570 | ||
576 | /* The ring status page addresses are no longer next to the rest of |
571 | /* The ring status page addresses are no longer next to the rest of |
577 | * the ring registers as of gen7. |
572 | * the ring registers as of gen7. |
578 | */ |
573 | */ |
579 | if (IS_GEN7(dev)) { |
574 | if (IS_GEN7(dev)) { |
580 | switch (ring->id) { |
575 | switch (ring->id) { |
581 | case RING_RENDER: |
576 | case RING_RENDER: |
582 | mmio = RENDER_HWS_PGA_GEN7; |
577 | mmio = RENDER_HWS_PGA_GEN7; |
583 | break; |
578 | break; |
584 | case RING_BLT: |
579 | case RING_BLT: |
585 | mmio = BLT_HWS_PGA_GEN7; |
580 | mmio = BLT_HWS_PGA_GEN7; |
586 | break; |
581 | break; |
587 | case RING_BSD: |
582 | case RING_BSD: |
588 | mmio = BSD_HWS_PGA_GEN7; |
583 | mmio = BSD_HWS_PGA_GEN7; |
589 | break; |
584 | break; |
590 | } |
585 | } |
591 | } else if (IS_GEN6(ring->dev)) { |
586 | } else if (IS_GEN6(ring->dev)) { |
592 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); |
587 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); |
593 | } else { |
588 | } else { |
594 | mmio = RING_HWS_PGA(ring->mmio_base); |
589 | mmio = RING_HWS_PGA(ring->mmio_base); |
595 | } |
590 | } |
596 | 591 | ||
597 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
592 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
598 | POSTING_READ(mmio); |
593 | POSTING_READ(mmio); |
599 | } |
594 | } |
600 | #endif |
595 | #endif |
601 | 596 | ||
602 | static int |
597 | static int |
603 | bsd_ring_flush(struct intel_ring_buffer *ring, |
598 | bsd_ring_flush(struct intel_ring_buffer *ring, |
604 | u32 invalidate_domains, |
599 | u32 invalidate_domains, |
605 | u32 flush_domains) |
600 | u32 flush_domains) |
606 | { |
601 | { |
607 | int ret; |
602 | int ret; |
608 | 603 | ||
609 | ret = intel_ring_begin(ring, 2); |
604 | ret = intel_ring_begin(ring, 2); |
610 | if (ret) |
605 | if (ret) |
611 | return ret; |
606 | return ret; |
612 | 607 | ||
613 | intel_ring_emit(ring, MI_FLUSH); |
608 | intel_ring_emit(ring, MI_FLUSH); |
614 | intel_ring_emit(ring, MI_NOOP); |
609 | intel_ring_emit(ring, MI_NOOP); |
615 | intel_ring_advance(ring); |
610 | intel_ring_advance(ring); |
616 | return 0; |
611 | return 0; |
617 | } |
612 | } |
618 | - | ||
619 | #if 0 |
- | |
620 | 613 | ||
621 | static int |
614 | static int |
622 | ring_add_request(struct intel_ring_buffer *ring, |
615 | ring_add_request(struct intel_ring_buffer *ring, |
623 | u32 *result) |
616 | u32 *result) |
624 | { |
617 | { |
625 | u32 seqno; |
618 | u32 seqno; |
626 | int ret; |
619 | int ret; |
627 | 620 | ||
628 | ret = intel_ring_begin(ring, 4); |
621 | ret = intel_ring_begin(ring, 4); |
629 | if (ret) |
622 | if (ret) |
630 | return ret; |
623 | return ret; |
631 | 624 | ||
632 | seqno = i915_gem_get_seqno(ring->dev); |
625 | seqno = i915_gem_get_seqno(ring->dev); |
633 | 626 | ||
634 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
627 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
635 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
628 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
636 | intel_ring_emit(ring, seqno); |
629 | intel_ring_emit(ring, seqno); |
637 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
630 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
638 | intel_ring_advance(ring); |
631 | intel_ring_advance(ring); |
639 | 632 | ||
640 | *result = seqno; |
633 | *result = seqno; |
641 | return 0; |
634 | return 0; |
642 | } |
635 | } |
- | 636 | ||
- | 637 | #if 0 |
|
643 | 638 | ||
644 | static bool |
639 | static bool |
645 | gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
640 | gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
646 | { |
641 | { |
647 | struct drm_device *dev = ring->dev; |
642 | struct drm_device *dev = ring->dev; |
648 | drm_i915_private_t *dev_priv = dev->dev_private; |
643 | drm_i915_private_t *dev_priv = dev->dev_private; |
649 | 644 | ||
650 | if (!dev->irq_enabled) |
645 | if (!dev->irq_enabled) |
651 | return false; |
646 | return false; |
652 | 647 | ||
653 | spin_lock(&ring->irq_lock); |
648 | spin_lock(&ring->irq_lock); |
654 | if (ring->irq_refcount++ == 0) { |
649 | if (ring->irq_refcount++ == 0) { |
655 | ring->irq_mask &= ~rflag; |
650 | ring->irq_mask &= ~rflag; |
656 | I915_WRITE_IMR(ring, ring->irq_mask); |
651 | I915_WRITE_IMR(ring, ring->irq_mask); |
657 | ironlake_enable_irq(dev_priv, gflag); |
652 | ironlake_enable_irq(dev_priv, gflag); |
658 | } |
653 | } |
659 | spin_unlock(&ring->irq_lock); |
654 | spin_unlock(&ring->irq_lock); |
660 | 655 | ||
661 | return true; |
656 | return true; |
662 | } |
657 | } |
663 | 658 | ||
664 | static void |
659 | static void |
665 | gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
660 | gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
666 | { |
661 | { |
667 | struct drm_device *dev = ring->dev; |
662 | struct drm_device *dev = ring->dev; |
668 | drm_i915_private_t *dev_priv = dev->dev_private; |
663 | drm_i915_private_t *dev_priv = dev->dev_private; |
669 | 664 | ||
670 | spin_lock(&ring->irq_lock); |
665 | spin_lock(&ring->irq_lock); |
671 | if (--ring->irq_refcount == 0) { |
666 | if (--ring->irq_refcount == 0) { |
672 | ring->irq_mask |= rflag; |
667 | ring->irq_mask |= rflag; |
673 | I915_WRITE_IMR(ring, ring->irq_mask); |
668 | I915_WRITE_IMR(ring, ring->irq_mask); |
674 | ironlake_disable_irq(dev_priv, gflag); |
669 | ironlake_disable_irq(dev_priv, gflag); |
675 | } |
670 | } |
676 | spin_unlock(&ring->irq_lock); |
671 | spin_unlock(&ring->irq_lock); |
677 | } |
672 | } |
678 | 673 | ||
679 | static bool |
674 | static bool |
680 | bsd_ring_get_irq(struct intel_ring_buffer *ring) |
675 | bsd_ring_get_irq(struct intel_ring_buffer *ring) |
681 | { |
676 | { |
682 | struct drm_device *dev = ring->dev; |
677 | struct drm_device *dev = ring->dev; |
683 | drm_i915_private_t *dev_priv = dev->dev_private; |
678 | drm_i915_private_t *dev_priv = dev->dev_private; |
684 | 679 | ||
685 | if (!dev->irq_enabled) |
680 | if (!dev->irq_enabled) |
686 | return false; |
681 | return false; |
687 | 682 | ||
688 | spin_lock(&ring->irq_lock); |
683 | spin_lock(&ring->irq_lock); |
689 | if (ring->irq_refcount++ == 0) { |
684 | if (ring->irq_refcount++ == 0) { |
690 | if (IS_G4X(dev)) |
685 | if (IS_G4X(dev)) |
691 | i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT); |
686 | i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT); |
692 | else |
687 | else |
693 | ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT); |
688 | ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT); |
694 | } |
689 | } |
695 | spin_unlock(&ring->irq_lock); |
690 | spin_unlock(&ring->irq_lock); |
696 | 691 | ||
697 | return true; |
692 | return true; |
698 | } |
693 | } |
699 | static void |
694 | static void |
700 | bsd_ring_put_irq(struct intel_ring_buffer *ring) |
695 | bsd_ring_put_irq(struct intel_ring_buffer *ring) |
701 | { |
696 | { |
702 | struct drm_device *dev = ring->dev; |
697 | struct drm_device *dev = ring->dev; |
703 | drm_i915_private_t *dev_priv = dev->dev_private; |
698 | drm_i915_private_t *dev_priv = dev->dev_private; |
704 | 699 | ||
705 | spin_lock(&ring->irq_lock); |
700 | spin_lock(&ring->irq_lock); |
706 | if (--ring->irq_refcount == 0) { |
701 | if (--ring->irq_refcount == 0) { |
707 | if (IS_G4X(dev)) |
702 | if (IS_G4X(dev)) |
708 | i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT); |
703 | i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT); |
709 | else |
704 | else |
710 | ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT); |
705 | ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT); |
711 | } |
706 | } |
712 | spin_unlock(&ring->irq_lock); |
707 | spin_unlock(&ring->irq_lock); |
713 | } |
708 | } |
714 | 709 | ||
715 | static int |
710 | static int |
716 | ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) |
711 | ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) |
717 | { |
712 | { |
718 | int ret; |
713 | int ret; |
719 | 714 | ||
720 | ret = intel_ring_begin(ring, 2); |
715 | ret = intel_ring_begin(ring, 2); |
721 | if (ret) |
716 | if (ret) |
722 | return ret; |
717 | return ret; |
723 | 718 | ||
724 | intel_ring_emit(ring, |
719 | intel_ring_emit(ring, |
725 | MI_BATCH_BUFFER_START | (2 << 6) | |
720 | MI_BATCH_BUFFER_START | (2 << 6) | |
726 | MI_BATCH_NON_SECURE_I965); |
721 | MI_BATCH_NON_SECURE_I965); |
727 | intel_ring_emit(ring, offset); |
722 | intel_ring_emit(ring, offset); |
728 | intel_ring_advance(ring); |
723 | intel_ring_advance(ring); |
729 | 724 | ||
730 | return 0; |
725 | return 0; |
731 | } |
726 | } |
732 | 727 | ||
733 | static int |
728 | static int |
734 | render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
729 | render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
735 | u32 offset, u32 len) |
730 | u32 offset, u32 len) |
736 | { |
731 | { |
737 | struct drm_device *dev = ring->dev; |
732 | struct drm_device *dev = ring->dev; |
738 | int ret; |
733 | int ret; |
739 | 734 | ||
740 | if (IS_I830(dev) || IS_845G(dev)) { |
735 | if (IS_I830(dev) || IS_845G(dev)) { |
741 | ret = intel_ring_begin(ring, 4); |
736 | ret = intel_ring_begin(ring, 4); |
742 | if (ret) |
737 | if (ret) |
743 | return ret; |
738 | return ret; |
744 | 739 | ||
745 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
740 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
746 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
741 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
747 | intel_ring_emit(ring, offset + len - 8); |
742 | intel_ring_emit(ring, offset + len - 8); |
748 | intel_ring_emit(ring, 0); |
743 | intel_ring_emit(ring, 0); |
749 | } else { |
744 | } else { |
750 | ret = intel_ring_begin(ring, 2); |
745 | ret = intel_ring_begin(ring, 2); |
751 | if (ret) |
746 | if (ret) |
752 | return ret; |
747 | return ret; |
753 | 748 | ||
754 | if (INTEL_INFO(dev)->gen >= 4) { |
749 | if (INTEL_INFO(dev)->gen >= 4) { |
755 | intel_ring_emit(ring, |
750 | intel_ring_emit(ring, |
756 | MI_BATCH_BUFFER_START | (2 << 6) | |
751 | MI_BATCH_BUFFER_START | (2 << 6) | |
757 | MI_BATCH_NON_SECURE_I965); |
752 | MI_BATCH_NON_SECURE_I965); |
758 | intel_ring_emit(ring, offset); |
753 | intel_ring_emit(ring, offset); |
759 | } else { |
754 | } else { |
760 | intel_ring_emit(ring, |
755 | intel_ring_emit(ring, |
761 | MI_BATCH_BUFFER_START | (2 << 6)); |
756 | MI_BATCH_BUFFER_START | (2 << 6)); |
762 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
757 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
763 | } |
758 | } |
764 | } |
759 | } |
765 | intel_ring_advance(ring); |
760 | intel_ring_advance(ring); |
766 | 761 | ||
767 | return 0; |
762 | return 0; |
768 | } |
763 | } |
769 | 764 | ||
770 | static void cleanup_status_page(struct intel_ring_buffer *ring) |
765 | static void cleanup_status_page(struct intel_ring_buffer *ring) |
771 | { |
766 | { |
772 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
767 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
773 | struct drm_i915_gem_object *obj; |
768 | struct drm_i915_gem_object *obj; |
774 | 769 | ||
775 | obj = ring->status_page.obj; |
770 | obj = ring->status_page.obj; |
776 | if (obj == NULL) |
771 | if (obj == NULL) |
777 | return; |
772 | return; |
778 | 773 | ||
779 | kunmap(obj->pages[0]); |
774 | kunmap(obj->pages[0]); |
780 | i915_gem_object_unpin(obj); |
775 | // i915_gem_object_unpin(obj); |
781 | drm_gem_object_unreference(&obj->base); |
776 | // drm_gem_object_unreference(&obj->base); |
782 | ring->status_page.obj = NULL; |
777 | ring->status_page.obj = NULL; |
783 | 778 | ||
784 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
779 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
785 | } |
780 | } |
786 | 781 | ||
787 | static int init_status_page(struct intel_ring_buffer *ring) |
782 | static int init_status_page(struct intel_ring_buffer *ring) |
788 | { |
783 | { |
789 | struct drm_device *dev = ring->dev; |
784 | struct drm_device *dev = ring->dev; |
790 | drm_i915_private_t *dev_priv = dev->dev_private; |
785 | drm_i915_private_t *dev_priv = dev->dev_private; |
791 | struct drm_i915_gem_object *obj; |
786 | struct drm_i915_gem_object *obj; |
792 | int ret; |
787 | int ret; |
793 | 788 | ||
794 | obj = i915_gem_alloc_object(dev, 4096); |
789 | obj = i915_gem_alloc_object(dev, 4096); |
795 | if (obj == NULL) { |
790 | if (obj == NULL) { |
796 | DRM_ERROR("Failed to allocate status page\n"); |
791 | DRM_ERROR("Failed to allocate status page\n"); |
797 | ret = -ENOMEM; |
792 | ret = -ENOMEM; |
798 | goto err; |
793 | goto err; |
799 | } |
794 | } |
800 | 795 | ||
801 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
796 | // i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
802 | 797 | ||
803 | ret = i915_gem_object_pin(obj, 4096, true); |
798 | ret = i915_gem_object_pin(obj, 4096, true); |
804 | if (ret != 0) { |
799 | if (ret != 0) { |
805 | goto err_unref; |
800 | goto err_unref; |
806 | } |
801 | } |
807 | 802 | ||
808 | ring->status_page.gfx_addr = obj->gtt_offset; |
803 | ring->status_page.gfx_addr = obj->gtt_offset; |
809 | ring->status_page.page_addr = kmap(obj->pages[0]); |
804 | ring->status_page.page_addr = kmap(obj->pages[0]); |
810 | if (ring->status_page.page_addr == NULL) { |
805 | if (ring->status_page.page_addr == NULL) { |
811 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
806 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
812 | goto err_unpin; |
807 | goto err_unpin; |
813 | } |
808 | } |
814 | ring->status_page.obj = obj; |
809 | ring->status_page.obj = obj; |
815 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
810 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
816 | 811 | ||
817 | intel_ring_setup_status_page(ring); |
812 | intel_ring_setup_status_page(ring); |
818 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
813 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
819 | ring->name, ring->status_page.gfx_addr); |
814 | ring->name, ring->status_page.gfx_addr); |
820 | 815 | ||
821 | return 0; |
816 | return 0; |
822 | 817 | ||
823 | err_unpin: |
818 | err_unpin: |
824 | i915_gem_object_unpin(obj); |
819 | i915_gem_object_unpin(obj); |
825 | err_unref: |
820 | err_unref: |
826 | drm_gem_object_unreference(&obj->base); |
821 | drm_gem_object_unreference(&obj->base); |
827 | err: |
822 | err: |
828 | return ret; |
823 | return ret; |
829 | } |
824 | } |
830 | #endif |
825 | #endif |
831 | 826 | ||
832 | int intel_init_ring_buffer(struct drm_device *dev, |
827 | int intel_init_ring_buffer(struct drm_device *dev, |
833 | struct intel_ring_buffer *ring) |
828 | struct intel_ring_buffer *ring) |
834 | { |
829 | { |
835 | struct drm_i915_gem_object *obj=NULL; |
830 | struct drm_i915_gem_object *obj=NULL; |
836 | int ret; |
831 | int ret; |
837 | ENTER(); |
832 | ENTER(); |
838 | ring->dev = dev; |
833 | ring->dev = dev; |
839 | INIT_LIST_HEAD(&ring->active_list); |
834 | INIT_LIST_HEAD(&ring->active_list); |
840 | INIT_LIST_HEAD(&ring->request_list); |
835 | INIT_LIST_HEAD(&ring->request_list); |
841 | INIT_LIST_HEAD(&ring->gpu_write_list); |
836 | INIT_LIST_HEAD(&ring->gpu_write_list); |
842 | 837 | ||
843 | // init_waitqueue_head(&ring->irq_queue); |
838 | // init_waitqueue_head(&ring->irq_queue); |
844 | // spin_lock_init(&ring->irq_lock); |
839 | // spin_lock_init(&ring->irq_lock); |
845 | ring->irq_mask = ~0; |
840 | ring->irq_mask = ~0; |
846 | 841 | ||
847 | if (I915_NEED_GFX_HWS(dev)) { |
842 | if (I915_NEED_GFX_HWS(dev)) { |
848 | // ret = init_status_page(ring); |
843 | // ret = init_status_page(ring); |
849 | // if (ret) |
844 | // if (ret) |
850 | // return ret; |
845 | // return ret; |
851 | } |
846 | } |
852 | 847 | ||
853 | obj = i915_gem_alloc_object(dev, ring->size); |
848 | obj = i915_gem_alloc_object(dev, ring->size); |
854 | if (obj == NULL) { |
849 | if (obj == NULL) { |
855 | DRM_ERROR("Failed to allocate ringbuffer\n"); |
850 | DRM_ERROR("Failed to allocate ringbuffer\n"); |
856 | ret = -ENOMEM; |
851 | ret = -ENOMEM; |
857 | goto err_hws; |
852 | goto err_hws; |
858 | } |
853 | } |
859 | 854 | ||
860 | ring->obj = obj; |
855 | ring->obj = obj; |
861 | 856 | ||
862 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); |
857 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); |
863 | if (ret) |
858 | if (ret) |
864 | goto err_unref; |
859 | goto err_unref; |
865 | 860 | ||
866 | ring->map.size = ring->size; |
861 | ring->map.size = ring->size; |
867 | ring->map.offset = get_bus_addr() + obj->gtt_offset; |
862 | ring->map.offset = get_bus_addr() + obj->gtt_offset; |
868 | ring->map.type = 0; |
863 | ring->map.type = 0; |
869 | ring->map.flags = 0; |
864 | ring->map.flags = 0; |
870 | ring->map.mtrr = 0; |
865 | ring->map.mtrr = 0; |
871 | 866 | ||
872 | // drm_core_ioremap_wc(&ring->map, dev); |
867 | // drm_core_ioremap_wc(&ring->map, dev); |
873 | 868 | ||
874 | ring->map.handle = ioremap(ring->map.offset, ring->map.size); |
869 | ring->map.handle = ioremap(ring->map.offset, ring->map.size); |
875 | 870 | ||
876 | if (ring->map.handle == NULL) { |
871 | if (ring->map.handle == NULL) { |
877 | DRM_ERROR("Failed to map ringbuffer.\n"); |
872 | DRM_ERROR("Failed to map ringbuffer.\n"); |
878 | ret = -EINVAL; |
873 | ret = -EINVAL; |
879 | goto err_unpin; |
874 | goto err_unpin; |
880 | } |
875 | } |
881 | 876 | ||
882 | ring->virtual_start = ring->map.handle; |
877 | ring->virtual_start = ring->map.handle; |
883 | ret = ring->init(ring); |
878 | ret = ring->init(ring); |
884 | if (ret) |
879 | if (ret) |
885 | goto err_unmap; |
880 | goto err_unmap; |
886 | 881 | ||
887 | /* Workaround an erratum on the i830 which causes a hang if |
882 | /* Workaround an erratum on the i830 which causes a hang if |
888 | * the TAIL pointer points to within the last 2 cachelines |
883 | * the TAIL pointer points to within the last 2 cachelines |
889 | * of the buffer. |
884 | * of the buffer. |
890 | */ |
885 | */ |
891 | ring->effective_size = ring->size; |
886 | ring->effective_size = ring->size; |
892 | if (IS_I830(ring->dev)) |
887 | if (IS_I830(ring->dev)) |
893 | ring->effective_size -= 128; |
888 | ring->effective_size -= 128; |
894 | LEAVE(); |
889 | LEAVE(); |
895 | return 0; |
890 | return 0; |
896 | 891 | ||
897 | err_unmap: |
892 | err_unmap: |
898 | // drm_core_ioremapfree(&ring->map, dev); |
893 | // drm_core_ioremapfree(&ring->map, dev); |
899 | FreeKernelSpace(ring->virtual_start); |
894 | FreeKernelSpace(ring->virtual_start); |
900 | err_unpin: |
895 | err_unpin: |
901 | // i915_gem_object_unpin(obj); |
896 | // i915_gem_object_unpin(obj); |
902 | err_unref: |
897 | err_unref: |
903 | // drm_gem_object_unreference(&obj->base); |
898 | // drm_gem_object_unreference(&obj->base); |
904 | ring->obj = NULL; |
899 | ring->obj = NULL; |
905 | err_hws: |
900 | err_hws: |
906 | // cleanup_status_page(ring); |
901 | // cleanup_status_page(ring); |
907 | return ret; |
902 | return ret; |
908 | } |
903 | } |
909 | - | ||
910 | 904 | ||
911 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) |
905 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) |
912 | { |
906 | { |
913 | struct drm_i915_private *dev_priv; |
907 | struct drm_i915_private *dev_priv; |
914 | int ret; |
908 | int ret; |
915 | 909 | ||
916 | if (ring->obj == NULL) |
910 | if (ring->obj == NULL) |
917 | return; |
911 | return; |
918 | 912 | ||
919 | /* Disable the ring buffer. The ring must be idle at this point */ |
913 | /* Disable the ring buffer. The ring must be idle at this point */ |
920 | dev_priv = ring->dev->dev_private; |
914 | dev_priv = ring->dev->dev_private; |
921 | ret = intel_wait_ring_idle(ring); |
915 | ret = intel_wait_ring_idle(ring); |
922 | if (ret) |
916 | if (ret) |
923 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", |
917 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", |
924 | ring->name, ret); |
918 | ring->name, ret); |
925 | 919 | ||
926 | I915_WRITE_CTL(ring, 0); |
920 | I915_WRITE_CTL(ring, 0); |
927 | 921 | ||
928 | // drm_core_ioremapfree(&ring->map, ring->dev); |
922 | // drm_core_ioremapfree(&ring->map, ring->dev); |
929 | 923 | ||
930 | // i915_gem_object_unpin(ring->obj); |
924 | // i915_gem_object_unpin(ring->obj); |
931 | // drm_gem_object_unreference(&ring->obj->base); |
925 | // drm_gem_object_unreference(&ring->obj->base); |
932 | ring->obj = NULL; |
926 | ring->obj = NULL; |
933 | 927 | ||
934 | if (ring->cleanup) |
928 | if (ring->cleanup) |
935 | ring->cleanup(ring); |
929 | ring->cleanup(ring); |
936 | 930 | ||
937 | // cleanup_status_page(ring); |
931 | // cleanup_status_page(ring); |
938 | } |
932 | } |
939 | - | ||
940 | 933 | ||
941 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
934 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
942 | { |
935 | { |
943 | unsigned int *virt; |
936 | unsigned int *virt; |
944 | int rem = ring->size - ring->tail; |
937 | int rem = ring->size - ring->tail; |
945 | 938 | ||
946 | if (ring->space < rem) { |
939 | if (ring->space < rem) { |
947 | int ret = intel_wait_ring_buffer(ring, rem); |
940 | int ret = intel_wait_ring_buffer(ring, rem); |
948 | if (ret) |
941 | if (ret) |
949 | return ret; |
942 | return ret; |
950 | } |
943 | } |
951 | 944 | ||
952 | virt = (unsigned int *)(ring->virtual_start + ring->tail); |
945 | virt = (unsigned int *)(ring->virtual_start + ring->tail); |
953 | rem /= 8; |
946 | rem /= 8; |
954 | while (rem--) { |
947 | while (rem--) { |
955 | *virt++ = MI_NOOP; |
948 | *virt++ = MI_NOOP; |
956 | *virt++ = MI_NOOP; |
949 | *virt++ = MI_NOOP; |
957 | } |
950 | } |
958 | 951 | ||
959 | ring->tail = 0; |
952 | ring->tail = 0; |
960 | ring->space = ring_space(ring); |
953 | ring->space = ring_space(ring); |
961 | 954 | ||
962 | return 0; |
955 | return 0; |
963 | } |
956 | } |
964 | 957 | ||
965 | int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) |
958 | int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) |
966 | { |
959 | { |
967 | struct drm_device *dev = ring->dev; |
960 | struct drm_device *dev = ring->dev; |
968 | struct drm_i915_private *dev_priv = dev->dev_private; |
961 | struct drm_i915_private *dev_priv = dev->dev_private; |
969 | unsigned long end; |
962 | unsigned long end; |
970 | u32 head; |
963 | u32 head; |
971 | 964 | ||
972 | /* If the reported head position has wrapped or hasn't advanced, |
965 | /* If the reported head position has wrapped or hasn't advanced, |
973 | * fallback to the slow and accurate path. |
966 | * fallback to the slow and accurate path. |
974 | */ |
967 | */ |
975 | head = intel_read_status_page(ring, 4); |
968 | head = intel_read_status_page(ring, 4); |
976 | if (head > ring->head) { |
969 | if (head > ring->head) { |
977 | ring->head = head; |
970 | ring->head = head; |
978 | ring->space = ring_space(ring); |
971 | ring->space = ring_space(ring); |
979 | if (ring->space >= n) |
972 | if (ring->space >= n) |
980 | return 0; |
973 | return 0; |
981 | } |
974 | } |
982 | 975 | ||
983 | // trace_i915_ring_wait_begin(ring); |
976 | // trace_i915_ring_wait_begin(ring); |
984 | end = jiffies + 3 * HZ; |
977 | end = jiffies + 3 * HZ; |
985 | do { |
978 | do { |
986 | ring->head = I915_READ_HEAD(ring); |
979 | ring->head = I915_READ_HEAD(ring); |
987 | ring->space = ring_space(ring); |
980 | ring->space = ring_space(ring); |
988 | if (ring->space >= n) { |
981 | if (ring->space >= n) { |
989 | // trace_i915_ring_wait_end(ring); |
982 | // trace_i915_ring_wait_end(ring); |
990 | return 0; |
983 | return 0; |
991 | } |
984 | } |
992 | 985 | ||
993 | if (dev->primary->master) { |
986 | if (dev->primary->master) { |
994 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
987 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
995 | if (master_priv->sarea_priv) |
988 | if (master_priv->sarea_priv) |
996 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
989 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
997 | } |
990 | } |
998 | 991 | ||
999 | msleep(1); |
992 | msleep(1); |
1000 | if (atomic_read(&dev_priv->mm.wedged)) |
993 | if (atomic_read(&dev_priv->mm.wedged)) |
1001 | return -EAGAIN; |
994 | return -EAGAIN; |
1002 | } while (!time_after(jiffies, end)); |
995 | } while (!time_after(jiffies, end)); |
1003 | // trace_i915_ring_wait_end(ring); |
996 | // trace_i915_ring_wait_end(ring); |
1004 | return -EBUSY; |
997 | return -EBUSY; |
1005 | } |
998 | } |
1006 | 999 | ||
1007 | int intel_ring_begin(struct intel_ring_buffer *ring, |
1000 | int intel_ring_begin(struct intel_ring_buffer *ring, |
1008 | int num_dwords) |
1001 | int num_dwords) |
1009 | { |
1002 | { |
1010 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
1003 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
1011 | int n = 4*num_dwords; |
1004 | int n = 4*num_dwords; |
1012 | int ret; |
1005 | int ret; |
1013 | 1006 | ||
1014 | if (unlikely(atomic_read(&dev_priv->mm.wedged))) |
1007 | if (unlikely(atomic_read(&dev_priv->mm.wedged))) |
1015 | return -EIO; |
1008 | return -EIO; |
1016 | 1009 | ||
1017 | if (unlikely(ring->tail + n > ring->effective_size)) { |
1010 | if (unlikely(ring->tail + n > ring->effective_size)) { |
1018 | ret = intel_wrap_ring_buffer(ring); |
1011 | ret = intel_wrap_ring_buffer(ring); |
1019 | if (unlikely(ret)) |
1012 | if (unlikely(ret)) |
1020 | return ret; |
1013 | return ret; |
1021 | } |
1014 | } |
1022 | 1015 | ||
1023 | if (unlikely(ring->space < n)) { |
1016 | if (unlikely(ring->space < n)) { |
1024 | ret = intel_wait_ring_buffer(ring, n); |
1017 | ret = intel_wait_ring_buffer(ring, n); |
1025 | if (unlikely(ret)) |
1018 | if (unlikely(ret)) |
1026 | return ret; |
1019 | return ret; |
1027 | } |
1020 | } |
1028 | 1021 | ||
1029 | ring->space -= n; |
1022 | ring->space -= n; |
1030 | return 0; |
1023 | return 0; |
1031 | } |
1024 | } |
1032 | 1025 | ||
1033 | void intel_ring_advance(struct intel_ring_buffer *ring) |
1026 | void intel_ring_advance(struct intel_ring_buffer *ring) |
1034 | { |
1027 | { |
1035 | ring->tail &= ring->size - 1; |
1028 | ring->tail &= ring->size - 1; |
1036 | ring->write_tail(ring, ring->tail); |
1029 | ring->write_tail(ring, ring->tail); |
1037 | } |
1030 | } |
1038 | - | ||
1039 | 1031 | ||
1040 | static const struct intel_ring_buffer render_ring = { |
1032 | static const struct intel_ring_buffer render_ring = { |
1041 | .name = "render ring", |
1033 | .name = "render ring", |
1042 | .id = RING_RENDER, |
1034 | .id = RING_RENDER, |
1043 | .mmio_base = RENDER_RING_BASE, |
1035 | .mmio_base = RENDER_RING_BASE, |
1044 | .size = 32 * PAGE_SIZE, |
1036 | .size = 32 * PAGE_SIZE, |
1045 | .init = init_render_ring, |
1037 | .init = init_render_ring, |
1046 | .write_tail = ring_write_tail, |
1038 | .write_tail = ring_write_tail, |
1047 | .flush = render_ring_flush, |
1039 | .flush = render_ring_flush, |
1048 | // .add_request = render_ring_add_request, |
1040 | .add_request = render_ring_add_request, |
1049 | // .get_seqno = ring_get_seqno, |
1041 | // .get_seqno = ring_get_seqno, |
1050 | // .irq_get = render_ring_get_irq, |
1042 | // .irq_get = render_ring_get_irq, |
1051 | // .irq_put = render_ring_put_irq, |
1043 | // .irq_put = render_ring_put_irq, |
1052 | // .dispatch_execbuffer = render_ring_dispatch_execbuffer, |
1044 | // .dispatch_execbuffer = render_ring_dispatch_execbuffer, |
1053 | // .cleanup = render_ring_cleanup, |
1045 | // .cleanup = render_ring_cleanup, |
1054 | }; |
1046 | }; |
1055 | 1047 | ||
1056 | /* ring buffer for bit-stream decoder */ |
1048 | /* ring buffer for bit-stream decoder */ |
1057 | 1049 | ||
1058 | static const struct intel_ring_buffer bsd_ring = { |
1050 | static const struct intel_ring_buffer bsd_ring = { |
1059 | .name = "bsd ring", |
1051 | .name = "bsd ring", |
1060 | .id = RING_BSD, |
1052 | .id = RING_BSD, |
1061 | .mmio_base = BSD_RING_BASE, |
1053 | .mmio_base = BSD_RING_BASE, |
1062 | .size = 32 * PAGE_SIZE, |
1054 | .size = 32 * PAGE_SIZE, |
1063 | .init = init_ring_common, |
1055 | .init = init_ring_common, |
1064 | .write_tail = ring_write_tail, |
1056 | .write_tail = ring_write_tail, |
1065 | .flush = bsd_ring_flush, |
1057 | .flush = bsd_ring_flush, |
1066 | // .add_request = ring_add_request, |
1058 | .add_request = ring_add_request, |
1067 | // .get_seqno = ring_get_seqno, |
1059 | // .get_seqno = ring_get_seqno, |
1068 | // .irq_get = bsd_ring_get_irq, |
1060 | // .irq_get = bsd_ring_get_irq, |
1069 | // .irq_put = bsd_ring_put_irq, |
1061 | // .irq_put = bsd_ring_put_irq, |
1070 | // .dispatch_execbuffer = ring_dispatch_execbuffer, |
1062 | // .dispatch_execbuffer = ring_dispatch_execbuffer, |
1071 | }; |
1063 | }; |
1072 | 1064 | ||
1073 | 1065 | ||
1074 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, |
1066 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, |
1075 | u32 value) |
1067 | u32 value) |
1076 | { |
1068 | { |
1077 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1069 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1078 | 1070 | ||
1079 | /* Every tail move must follow the sequence below */ |
1071 | /* Every tail move must follow the sequence below */ |
1080 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
1072 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
1081 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | |
1073 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | |
1082 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE); |
1074 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE); |
1083 | I915_WRITE(GEN6_BSD_RNCID, 0x0); |
1075 | I915_WRITE(GEN6_BSD_RNCID, 0x0); |
1084 | 1076 | ||
1085 | if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & |
1077 | if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & |
1086 | GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, |
1078 | GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, |
1087 | 50)) |
1079 | 50)) |
1088 | DRM_ERROR("timed out waiting for IDLE Indicator\n"); |
1080 | DRM_ERROR("timed out waiting for IDLE Indicator\n"); |
1089 | 1081 | ||
1090 | I915_WRITE_TAIL(ring, value); |
1082 | I915_WRITE_TAIL(ring, value); |
1091 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
1083 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
1092 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | |
1084 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | |
1093 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); |
1085 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); |
1094 | } |
1086 | } |
1095 | - | ||
1096 | 1087 | ||
1097 | static int gen6_ring_flush(struct intel_ring_buffer *ring, |
1088 | static int gen6_ring_flush(struct intel_ring_buffer *ring, |
1098 | u32 invalidate, u32 flush) |
1089 | u32 invalidate, u32 flush) |
1099 | { |
1090 | { |
1100 | uint32_t cmd; |
1091 | uint32_t cmd; |
1101 | int ret; |
1092 | int ret; |
1102 | 1093 | ||
1103 | ret = intel_ring_begin(ring, 4); |
1094 | ret = intel_ring_begin(ring, 4); |
1104 | if (ret) |
1095 | if (ret) |
1105 | return ret; |
1096 | return ret; |
1106 | 1097 | ||
1107 | cmd = MI_FLUSH_DW; |
1098 | cmd = MI_FLUSH_DW; |
1108 | if (invalidate & I915_GEM_GPU_DOMAINS) |
1099 | if (invalidate & I915_GEM_GPU_DOMAINS) |
1109 | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; |
1100 | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; |
1110 | intel_ring_emit(ring, cmd); |
1101 | intel_ring_emit(ring, cmd); |
1111 | intel_ring_emit(ring, 0); |
1102 | intel_ring_emit(ring, 0); |
1112 | intel_ring_emit(ring, 0); |
1103 | intel_ring_emit(ring, 0); |
1113 | intel_ring_emit(ring, MI_NOOP); |
1104 | intel_ring_emit(ring, MI_NOOP); |
1114 | intel_ring_advance(ring); |
1105 | intel_ring_advance(ring); |
1115 | return 0; |
1106 | return 0; |
1116 | } |
1107 | } |
1117 | 1108 | ||
1118 | #if 0 |
1109 | #if 0 |
1119 | static int |
1110 | static int |
1120 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
1111 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
1121 | u32 offset, u32 len) |
1112 | u32 offset, u32 len) |
1122 | { |
1113 | { |
1123 | int ret; |
1114 | int ret; |
1124 | 1115 | ||
1125 | ret = intel_ring_begin(ring, 2); |
1116 | ret = intel_ring_begin(ring, 2); |
1126 | if (ret) |
1117 | if (ret) |
1127 | return ret; |
1118 | return ret; |
1128 | 1119 | ||
1129 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); |
1120 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); |
1130 | /* bit0-7 is the length on GEN6+ */ |
1121 | /* bit0-7 is the length on GEN6+ */ |
1131 | intel_ring_emit(ring, offset); |
1122 | intel_ring_emit(ring, offset); |
1132 | intel_ring_advance(ring); |
1123 | intel_ring_advance(ring); |
1133 | 1124 | ||
1134 | return 0; |
1125 | return 0; |
1135 | } |
1126 | } |
1136 | 1127 | ||
1137 | static bool |
1128 | static bool |
1138 | gen6_render_ring_get_irq(struct intel_ring_buffer *ring) |
1129 | gen6_render_ring_get_irq(struct intel_ring_buffer *ring) |
1139 | { |
1130 | { |
1140 | return gen6_ring_get_irq(ring, |
1131 | return gen6_ring_get_irq(ring, |
1141 | GT_USER_INTERRUPT, |
1132 | GT_USER_INTERRUPT, |
1142 | GEN6_RENDER_USER_INTERRUPT); |
1133 | GEN6_RENDER_USER_INTERRUPT); |
1143 | } |
1134 | } |
1144 | 1135 | ||
1145 | static void |
1136 | static void |
1146 | gen6_render_ring_put_irq(struct intel_ring_buffer *ring) |
1137 | gen6_render_ring_put_irq(struct intel_ring_buffer *ring) |
1147 | { |
1138 | { |
1148 | return gen6_ring_put_irq(ring, |
1139 | return gen6_ring_put_irq(ring, |
1149 | GT_USER_INTERRUPT, |
1140 | GT_USER_INTERRUPT, |
1150 | GEN6_RENDER_USER_INTERRUPT); |
1141 | GEN6_RENDER_USER_INTERRUPT); |
1151 | } |
1142 | } |
1152 | 1143 | ||
1153 | static bool |
1144 | static bool |
1154 | gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) |
1145 | gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) |
1155 | { |
1146 | { |
1156 | return gen6_ring_get_irq(ring, |
1147 | return gen6_ring_get_irq(ring, |
1157 | GT_GEN6_BSD_USER_INTERRUPT, |
1148 | GT_GEN6_BSD_USER_INTERRUPT, |
1158 | GEN6_BSD_USER_INTERRUPT); |
1149 | GEN6_BSD_USER_INTERRUPT); |
1159 | } |
1150 | } |
1160 | 1151 | ||
1161 | static void |
1152 | static void |
1162 | gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) |
1153 | gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) |
1163 | { |
1154 | { |
1164 | return gen6_ring_put_irq(ring, |
1155 | return gen6_ring_put_irq(ring, |
1165 | GT_GEN6_BSD_USER_INTERRUPT, |
1156 | GT_GEN6_BSD_USER_INTERRUPT, |
1166 | GEN6_BSD_USER_INTERRUPT); |
1157 | GEN6_BSD_USER_INTERRUPT); |
1167 | } |
1158 | } |
1168 | 1159 | ||
1169 | #endif |
1160 | #endif |
1170 | 1161 | ||
1171 | /* ring buffer for Video Codec for Gen6+ */ |
1162 | /* ring buffer for Video Codec for Gen6+ */ |
1172 | static const struct intel_ring_buffer gen6_bsd_ring = { |
1163 | static const struct intel_ring_buffer gen6_bsd_ring = { |
1173 | .name = "gen6 bsd ring", |
1164 | .name = "gen6 bsd ring", |
1174 | .id = RING_BSD, |
1165 | .id = RING_BSD, |
1175 | .mmio_base = GEN6_BSD_RING_BASE, |
1166 | .mmio_base = GEN6_BSD_RING_BASE, |
1176 | .size = 32 * PAGE_SIZE, |
1167 | .size = 32 * PAGE_SIZE, |
1177 | .init = init_ring_common, |
1168 | .init = init_ring_common, |
1178 | .write_tail = gen6_bsd_ring_write_tail, |
1169 | .write_tail = gen6_bsd_ring_write_tail, |
1179 | .flush = gen6_ring_flush, |
1170 | .flush = gen6_ring_flush, |
1180 | // .add_request = gen6_add_request, |
1171 | .add_request = gen6_add_request, |
1181 | // .get_seqno = ring_get_seqno, |
1172 | // .get_seqno = ring_get_seqno, |
1182 | // .irq_get = gen6_bsd_ring_get_irq, |
1173 | // .irq_get = gen6_bsd_ring_get_irq, |
1183 | // .irq_put = gen6_bsd_ring_put_irq, |
1174 | // .irq_put = gen6_bsd_ring_put_irq, |
1184 | // .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
1175 | // .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
1185 | }; |
1176 | }; |
1186 | 1177 | ||
1187 | #if 0 |
1178 | #if 0 |
1188 | /* Blitter support (SandyBridge+) */ |
1179 | /* Blitter support (SandyBridge+) */ |
1189 | 1180 | ||
1190 | static bool |
1181 | static bool |
1191 | blt_ring_get_irq(struct intel_ring_buffer *ring) |
1182 | blt_ring_get_irq(struct intel_ring_buffer *ring) |
1192 | { |
1183 | { |
1193 | return gen6_ring_get_irq(ring, |
1184 | return gen6_ring_get_irq(ring, |
1194 | GT_BLT_USER_INTERRUPT, |
1185 | GT_BLT_USER_INTERRUPT, |
1195 | GEN6_BLITTER_USER_INTERRUPT); |
1186 | GEN6_BLITTER_USER_INTERRUPT); |
1196 | } |
1187 | } |
1197 | 1188 | ||
1198 | static void |
1189 | static void |
1199 | blt_ring_put_irq(struct intel_ring_buffer *ring) |
1190 | blt_ring_put_irq(struct intel_ring_buffer *ring) |
1200 | { |
1191 | { |
1201 | gen6_ring_put_irq(ring, |
1192 | gen6_ring_put_irq(ring, |
1202 | GT_BLT_USER_INTERRUPT, |
1193 | GT_BLT_USER_INTERRUPT, |
1203 | GEN6_BLITTER_USER_INTERRUPT); |
1194 | GEN6_BLITTER_USER_INTERRUPT); |
1204 | } |
1195 | } |
1205 | #endif |
1196 | #endif |
1206 | 1197 | ||
1207 | 1198 | ||
1208 | /* Workaround for some stepping of SNB, |
1199 | /* Workaround for some stepping of SNB, |
1209 | * each time when BLT engine ring tail moved, |
1200 | * each time when BLT engine ring tail moved, |
1210 | * the first command in the ring to be parsed |
1201 | * the first command in the ring to be parsed |
1211 | * should be MI_BATCH_BUFFER_START |
1202 | * should be MI_BATCH_BUFFER_START |
1212 | */ |
1203 | */ |
1213 | #define NEED_BLT_WORKAROUND(dev) \ |
1204 | #define NEED_BLT_WORKAROUND(dev) \ |
1214 | (IS_GEN6(dev) && (dev->pdev->revision < 8)) |
1205 | (IS_GEN6(dev) && (dev->pdev->revision < 8)) |
1215 | 1206 | ||
1216 | static inline struct drm_i915_gem_object * |
1207 | static inline struct drm_i915_gem_object * |
1217 | to_blt_workaround(struct intel_ring_buffer *ring) |
1208 | to_blt_workaround(struct intel_ring_buffer *ring) |
1218 | { |
1209 | { |
1219 | return ring->private; |
1210 | return ring->private; |
1220 | } |
1211 | } |
1221 | - | ||
1222 | 1212 | ||
1223 | static int blt_ring_init(struct intel_ring_buffer *ring) |
1213 | static int blt_ring_init(struct intel_ring_buffer *ring) |
1224 | { |
1214 | { |
1225 | if (NEED_BLT_WORKAROUND(ring->dev)) { |
1215 | if (NEED_BLT_WORKAROUND(ring->dev)) { |
1226 | struct drm_i915_gem_object *obj; |
1216 | struct drm_i915_gem_object *obj; |
1227 | u32 *ptr; |
1217 | u32 *ptr; |
1228 | int ret; |
1218 | int ret; |
1229 | 1219 | ||
1230 | obj = i915_gem_alloc_object(ring->dev, 4096); |
1220 | obj = i915_gem_alloc_object(ring->dev, 4096); |
1231 | if (obj == NULL) |
1221 | if (obj == NULL) |
1232 | return -ENOMEM; |
1222 | return -ENOMEM; |
1233 | 1223 | ||
1234 | ret = i915_gem_object_pin(obj, 4096, true); |
1224 | ret = i915_gem_object_pin(obj, 4096, true); |
1235 | if (ret) { |
1225 | if (ret) { |
1236 | // drm_gem_object_unreference(&obj->base); |
1226 | // drm_gem_object_unreference(&obj->base); |
1237 | return ret; |
1227 | return ret; |
1238 | } |
1228 | } |
1239 | 1229 | ||
1240 | ptr = ioremap(obj->pages[0], 4096); |
1230 | ptr = ioremap(obj->pages[0], 4096); |
1241 | *ptr++ = MI_BATCH_BUFFER_END; |
1231 | *ptr++ = MI_BATCH_BUFFER_END; |
1242 | *ptr++ = MI_NOOP; |
1232 | *ptr++ = MI_NOOP; |
1243 | // iounmap(obj->pages[0]); |
1233 | // iounmap(obj->pages[0]); |
1244 | 1234 | ||
1245 | ret = i915_gem_object_set_to_gtt_domain(obj, false); |
1235 | ret = i915_gem_object_set_to_gtt_domain(obj, false); |
1246 | if (ret) { |
1236 | if (ret) { |
1247 | // i915_gem_object_unpin(obj); |
1237 | // i915_gem_object_unpin(obj); |
1248 | // drm_gem_object_unreference(&obj->base); |
1238 | // drm_gem_object_unreference(&obj->base); |
1249 | return ret; |
1239 | return ret; |
1250 | } |
1240 | } |
1251 | 1241 | ||
1252 | ring->private = obj; |
1242 | ring->private = obj; |
1253 | } |
1243 | } |
1254 | 1244 | ||
1255 | return init_ring_common(ring); |
1245 | return init_ring_common(ring); |
1256 | } |
1246 | } |
1257 | 1247 | ||
1258 | static int blt_ring_begin(struct intel_ring_buffer *ring, |
1248 | static int blt_ring_begin(struct intel_ring_buffer *ring, |
1259 | int num_dwords) |
1249 | int num_dwords) |
1260 | { |
1250 | { |
1261 | if (ring->private) { |
1251 | if (ring->private) { |
1262 | int ret = intel_ring_begin(ring, num_dwords+2); |
1252 | int ret = intel_ring_begin(ring, num_dwords+2); |
1263 | if (ret) |
1253 | if (ret) |
1264 | return ret; |
1254 | return ret; |
1265 | 1255 | ||
1266 | intel_ring_emit(ring, MI_BATCH_BUFFER_START); |
1256 | intel_ring_emit(ring, MI_BATCH_BUFFER_START); |
1267 | intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset); |
1257 | intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset); |
1268 | 1258 | ||
1269 | return 0; |
1259 | return 0; |
1270 | } else |
1260 | } else |
1271 | return intel_ring_begin(ring, 4); |
1261 | return intel_ring_begin(ring, 4); |
1272 | } |
1262 | } |
1273 | 1263 | ||
1274 | static int blt_ring_flush(struct intel_ring_buffer *ring, |
1264 | static int blt_ring_flush(struct intel_ring_buffer *ring, |
1275 | u32 invalidate, u32 flush) |
1265 | u32 invalidate, u32 flush) |
1276 | { |
1266 | { |
1277 | uint32_t cmd; |
1267 | uint32_t cmd; |
1278 | int ret; |
1268 | int ret; |
1279 | 1269 | ||
1280 | ret = blt_ring_begin(ring, 4); |
1270 | ret = blt_ring_begin(ring, 4); |
1281 | if (ret) |
1271 | if (ret) |
1282 | return ret; |
1272 | return ret; |
1283 | 1273 | ||
1284 | cmd = MI_FLUSH_DW; |
1274 | cmd = MI_FLUSH_DW; |
1285 | if (invalidate & I915_GEM_DOMAIN_RENDER) |
1275 | if (invalidate & I915_GEM_DOMAIN_RENDER) |
1286 | cmd |= MI_INVALIDATE_TLB; |
1276 | cmd |= MI_INVALIDATE_TLB; |
1287 | intel_ring_emit(ring, cmd); |
1277 | intel_ring_emit(ring, cmd); |
1288 | intel_ring_emit(ring, 0); |
1278 | intel_ring_emit(ring, 0); |
1289 | intel_ring_emit(ring, 0); |
1279 | intel_ring_emit(ring, 0); |
1290 | intel_ring_emit(ring, MI_NOOP); |
1280 | intel_ring_emit(ring, MI_NOOP); |
1291 | intel_ring_advance(ring); |
1281 | intel_ring_advance(ring); |
1292 | return 0; |
1282 | return 0; |
1293 | } |
1283 | } |
1294 | 1284 | ||
1295 | static void blt_ring_cleanup(struct intel_ring_buffer *ring) |
1285 | static void blt_ring_cleanup(struct intel_ring_buffer *ring) |
1296 | { |
1286 | { |
1297 | if (!ring->private) |
1287 | if (!ring->private) |
1298 | return; |
1288 | return; |
1299 | 1289 | ||
1300 | i915_gem_object_unpin(ring->private); |
1290 | i915_gem_object_unpin(ring->private); |
1301 | drm_gem_object_unreference(ring->private); |
1291 | drm_gem_object_unreference(ring->private); |
1302 | ring->private = NULL; |
1292 | ring->private = NULL; |
1303 | } |
1293 | } |
1304 | - | ||
1305 | 1294 | ||
1306 | static const struct intel_ring_buffer gen6_blt_ring = { |
1295 | static const struct intel_ring_buffer gen6_blt_ring = { |
1307 | .name = "blt ring", |
1296 | .name = "blt ring", |
1308 | .id = RING_BLT, |
1297 | .id = RING_BLT, |
1309 | .mmio_base = BLT_RING_BASE, |
1298 | .mmio_base = BLT_RING_BASE, |
1310 | .size = 32 * PAGE_SIZE, |
1299 | .size = 32 * PAGE_SIZE, |
1311 | .init = blt_ring_init, |
1300 | .init = blt_ring_init, |
1312 | .write_tail = ring_write_tail, |
1301 | .write_tail = ring_write_tail, |
1313 | .flush = blt_ring_flush, |
1302 | .flush = blt_ring_flush, |
1314 | // .add_request = gen6_add_request, |
1303 | .add_request = gen6_add_request, |
1315 | // .get_seqno = ring_get_seqno, |
1304 | // .get_seqno = ring_get_seqno, |
1316 | // .irq_get = blt_ring_get_irq, |
1305 | // .irq_get = blt_ring_get_irq, |
1317 | // .irq_put = blt_ring_put_irq, |
1306 | // .irq_put = blt_ring_put_irq, |
1318 | // .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
1307 | // .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
1319 | // .cleanup = blt_ring_cleanup, |
1308 | // .cleanup = blt_ring_cleanup, |
1320 | }; |
1309 | }; |
1321 | - | ||
1322 | - | ||
1323 | 1310 | ||
1324 | int intel_init_render_ring_buffer(struct drm_device *dev) |
1311 | int intel_init_render_ring_buffer(struct drm_device *dev) |
1325 | { |
1312 | { |
1326 | drm_i915_private_t *dev_priv = dev->dev_private; |
1313 | drm_i915_private_t *dev_priv = dev->dev_private; |
1327 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
1314 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
1328 | ENTER(); |
1315 | ENTER(); |
1329 | *ring = render_ring; |
1316 | *ring = render_ring; |
1330 | if (INTEL_INFO(dev)->gen >= 6) { |
1317 | if (INTEL_INFO(dev)->gen >= 6) { |
1331 | // ring->add_request = gen6_add_request; |
1318 | ring->add_request = gen6_add_request; |
1332 | // ring->irq_get = gen6_render_ring_get_irq; |
1319 | // ring->irq_get = gen6_render_ring_get_irq; |
1333 | // ring->irq_put = gen6_render_ring_put_irq; |
1320 | // ring->irq_put = gen6_render_ring_put_irq; |
1334 | } else if (IS_GEN5(dev)) { |
1321 | } else if (IS_GEN5(dev)) { |
1335 | // ring->add_request = pc_render_add_request; |
1322 | ring->add_request = pc_render_add_request; |
1336 | // ring->get_seqno = pc_render_get_seqno; |
1323 | // ring->get_seqno = pc_render_get_seqno; |
1337 | } |
1324 | } |
1338 | 1325 | ||
1339 | if (!I915_NEED_GFX_HWS(dev)) { |
1326 | if (!I915_NEED_GFX_HWS(dev)) { |
1340 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; |
1327 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; |
1341 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
1328 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
1342 | } |
1329 | } |
1343 | LEAVE(); |
1330 | LEAVE(); |
1344 | return intel_init_ring_buffer(dev, ring); |
1331 | return intel_init_ring_buffer(dev, ring); |
1345 | } |
1332 | } |
1346 | 1333 | ||
1347 | 1334 | ||
1348 | int intel_init_bsd_ring_buffer(struct drm_device *dev) |
1335 | int intel_init_bsd_ring_buffer(struct drm_device *dev) |
1349 | { |
1336 | { |
1350 | drm_i915_private_t *dev_priv = dev->dev_private; |
1337 | drm_i915_private_t *dev_priv = dev->dev_private; |
1351 | struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; |
1338 | struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; |
1352 | 1339 | ||
1353 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
1340 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
1354 | *ring = gen6_bsd_ring; |
1341 | *ring = gen6_bsd_ring; |
1355 | else |
1342 | else |
1356 | *ring = bsd_ring; |
1343 | *ring = bsd_ring; |
1357 | 1344 | ||
1358 | return intel_init_ring_buffer(dev, ring); |
1345 | return intel_init_ring_buffer(dev, ring); |
1359 | } |
1346 | } |
1360 | 1347 | ||
1361 | int intel_init_blt_ring_buffer(struct drm_device *dev) |
1348 | int intel_init_blt_ring_buffer(struct drm_device *dev) |
1362 | { |
1349 | { |
1363 | drm_i915_private_t *dev_priv = dev->dev_private; |
1350 | drm_i915_private_t *dev_priv = dev->dev_private; |
1364 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; |
1351 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; |
1365 | 1352 | ||
1366 | *ring = gen6_blt_ring; |
1353 | *ring = gen6_blt_ring; |
1367 | 1354 | ||
1368 | return intel_init_ring_buffer(dev, ring); |
1355 | return intel_init_ring_buffer(dev, ring); |
1369 | }>>>><>><>><>><>><>><>><>><>><>>> |
1356 | }>>>><>><>><>><>><>><>><>><>><>>> |