Rev 2342 | Rev 2351 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
2332 | Serge | 1 | /* |
2 | * Copyright © 2008-2010 Intel Corporation |
||
3 | * |
||
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
5 | * copy of this software and associated documentation files (the "Software"), |
||
6 | * to deal in the Software without restriction, including without limitation |
||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
8 | * and/or sell copies of the Software, and to permit persons to whom the |
||
9 | * Software is furnished to do so, subject to the following conditions: |
||
10 | * |
||
11 | * The above copyright notice and this permission notice (including the next |
||
12 | * paragraph) shall be included in all copies or substantial portions of the |
||
13 | * Software. |
||
14 | * |
||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
||
21 | * IN THE SOFTWARE. |
||
22 | * |
||
23 | * Authors: |
||
24 | * Eric Anholt |
||
25 | * Zou Nan hai |
||
26 | * Xiang Hai hao |
||
27 | * |
||
28 | */ |
||
29 | #define iowrite32(v, addr) writel((v), (addr)) |
||
30 | #define ioread32(addr) readl(addr) |
||
31 | |||
32 | #include "drmP.h" |
||
33 | #include "drm.h" |
||
34 | #include "i915_drv.h" |
||
35 | #include "i915_drm.h" |
||
36 | //#include "i915_trace.h" |
||
37 | #include "intel_drv.h" |
||
38 | |||
2342 | Serge | 39 | /* |
40 | * 965+ support PIPE_CONTROL commands, which provide finer grained control |
||
41 | * over cache flushing. |
||
42 | */ |
||
43 | struct pipe_control { |
||
44 | struct drm_i915_gem_object *obj; |
||
45 | volatile u32 *cpu_page; |
||
46 | u32 gtt_offset; |
||
47 | }; |
||
48 | |||
2332 | Serge | 49 | static inline int ring_space(struct intel_ring_buffer *ring) |
50 | { |
||
51 | int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); |
||
52 | if (space < 0) |
||
53 | space += ring->size; |
||
54 | return space; |
||
55 | } |
||
56 | |||
57 | static u32 i915_gem_get_seqno(struct drm_device *dev) |
||
58 | { |
||
59 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
60 | u32 seqno; |
||
61 | |||
62 | seqno = dev_priv->next_seqno; |
||
63 | |||
64 | /* reserve 0 for non-seqno */ |
||
65 | if (++dev_priv->next_seqno == 0) |
||
66 | dev_priv->next_seqno = 1; |
||
67 | |||
68 | return seqno; |
||
69 | } |
||
70 | |||
71 | static int |
||
72 | render_ring_flush(struct intel_ring_buffer *ring, |
||
73 | u32 invalidate_domains, |
||
74 | u32 flush_domains) |
||
75 | { |
||
76 | struct drm_device *dev = ring->dev; |
||
77 | u32 cmd; |
||
78 | int ret; |
||
79 | |||
80 | /* |
||
81 | * read/write caches: |
||
82 | * |
||
83 | * I915_GEM_DOMAIN_RENDER is always invalidated, but is |
||
84 | * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is |
||
85 | * also flushed at 2d versus 3d pipeline switches. |
||
86 | * |
||
87 | * read-only caches: |
||
88 | * |
||
89 | * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if |
||
90 | * MI_READ_FLUSH is set, and is always flushed on 965. |
||
91 | * |
||
92 | * I915_GEM_DOMAIN_COMMAND may not exist? |
||
93 | * |
||
94 | * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is |
||
95 | * invalidated when MI_EXE_FLUSH is set. |
||
96 | * |
||
97 | * I915_GEM_DOMAIN_VERTEX, which exists on 965, is |
||
98 | * invalidated with every MI_FLUSH. |
||
99 | * |
||
100 | * TLBs: |
||
101 | * |
||
102 | * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND |
||
103 | * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and |
||
104 | * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER |
||
105 | * are flushed at any MI_FLUSH. |
||
106 | */ |
||
107 | |||
108 | cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; |
||
109 | if ((invalidate_domains|flush_domains) & |
||
110 | I915_GEM_DOMAIN_RENDER) |
||
111 | cmd &= ~MI_NO_WRITE_FLUSH; |
||
112 | if (INTEL_INFO(dev)->gen < 4) { |
||
113 | /* |
||
114 | * On the 965, the sampler cache always gets flushed |
||
115 | * and this bit is reserved. |
||
116 | */ |
||
117 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) |
||
118 | cmd |= MI_READ_FLUSH; |
||
119 | } |
||
120 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) |
||
121 | cmd |= MI_EXE_FLUSH; |
||
122 | |||
123 | if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && |
||
124 | (IS_G4X(dev) || IS_GEN5(dev))) |
||
125 | cmd |= MI_INVALIDATE_ISP; |
||
126 | |||
127 | ret = intel_ring_begin(ring, 2); |
||
128 | if (ret) |
||
129 | return ret; |
||
130 | |||
131 | intel_ring_emit(ring, cmd); |
||
132 | intel_ring_emit(ring, MI_NOOP); |
||
133 | intel_ring_advance(ring); |
||
134 | |||
135 | return 0; |
||
136 | } |
||
137 | |||
2342 | Serge | 138 | /** |
139 | * Emits a PIPE_CONTROL with a non-zero post-sync operation, for |
||
140 | * implementing two workarounds on gen6. From section 1.4.7.1 |
||
141 | * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: |
||
142 | * |
||
143 | * [DevSNB-C+{W/A}] Before any depth stall flush (including those |
||
144 | * produced by non-pipelined state commands), software needs to first |
||
145 | * send a PIPE_CONTROL with no bits set except Post-Sync Operation != |
||
146 | * 0. |
||
147 | * |
||
148 | * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable |
||
149 | * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. |
||
150 | * |
||
151 | * And the workaround for these two requires this workaround first: |
||
152 | * |
||
153 | * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent |
||
154 | * BEFORE the pipe-control with a post-sync op and no write-cache |
||
155 | * flushes. |
||
156 | * |
||
157 | * And this last workaround is tricky because of the requirements on |
||
158 | * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM |
||
159 | * volume 2 part 1: |
||
160 | * |
||
161 | * "1 of the following must also be set: |
||
162 | * - Render Target Cache Flush Enable ([12] of DW1) |
||
163 | * - Depth Cache Flush Enable ([0] of DW1) |
||
164 | * - Stall at Pixel Scoreboard ([1] of DW1) |
||
165 | * - Depth Stall ([13] of DW1) |
||
166 | * - Post-Sync Operation ([13] of DW1) |
||
167 | * - Notify Enable ([8] of DW1)" |
||
168 | * |
||
169 | * The cache flushes require the workaround flush that triggered this |
||
170 | * one, so we can't use it. Depth stall would trigger the same. |
||
171 | * Post-sync nonzero is what triggered this second workaround, so we |
||
172 | * can't use that one either. Notify enable is IRQs, which aren't |
||
173 | * really our business. That leaves only stall at scoreboard. |
||
174 | */ |
||
175 | static int |
||
176 | intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) |
||
177 | { |
||
178 | struct pipe_control *pc = ring->private; |
||
179 | u32 scratch_addr = pc->gtt_offset + 128; |
||
180 | int ret; |
||
181 | |||
182 | |||
183 | ret = intel_ring_begin(ring, 6); |
||
184 | if (ret) |
||
185 | return ret; |
||
186 | |||
187 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); |
||
188 | intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | |
||
189 | PIPE_CONTROL_STALL_AT_SCOREBOARD); |
||
190 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ |
||
191 | intel_ring_emit(ring, 0); /* low dword */ |
||
192 | intel_ring_emit(ring, 0); /* high dword */ |
||
193 | intel_ring_emit(ring, MI_NOOP); |
||
194 | intel_ring_advance(ring); |
||
195 | |||
196 | ret = intel_ring_begin(ring, 6); |
||
197 | if (ret) |
||
198 | return ret; |
||
199 | |||
200 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); |
||
201 | intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); |
||
202 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ |
||
203 | intel_ring_emit(ring, 0); |
||
204 | intel_ring_emit(ring, 0); |
||
205 | intel_ring_emit(ring, MI_NOOP); |
||
206 | intel_ring_advance(ring); |
||
207 | |||
208 | return 0; |
||
209 | } |
||
210 | |||
211 | static int |
||
212 | gen6_render_ring_flush(struct intel_ring_buffer *ring, |
||
213 | u32 invalidate_domains, u32 flush_domains) |
||
214 | { |
||
215 | u32 flags = 0; |
||
216 | struct pipe_control *pc = ring->private; |
||
217 | u32 scratch_addr = pc->gtt_offset + 128; |
||
218 | int ret; |
||
219 | |||
220 | /* Force SNB workarounds for PIPE_CONTROL flushes */ |
||
221 | intel_emit_post_sync_nonzero_flush(ring); |
||
222 | |||
223 | /* Just flush everything. Experiments have shown that reducing the |
||
224 | * number of bits based on the write domains has little performance |
||
225 | * impact. |
||
226 | */ |
||
227 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
||
228 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; |
||
229 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; |
||
230 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
||
231 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; |
||
232 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; |
||
233 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; |
||
234 | |||
235 | ret = intel_ring_begin(ring, 6); |
||
236 | if (ret) |
||
237 | return ret; |
||
238 | |||
239 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); |
||
240 | intel_ring_emit(ring, flags); |
||
241 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); |
||
242 | intel_ring_emit(ring, 0); /* lower dword */ |
||
243 | intel_ring_emit(ring, 0); /* uppwer dword */ |
||
244 | intel_ring_emit(ring, MI_NOOP); |
||
245 | intel_ring_advance(ring); |
||
246 | |||
247 | return 0; |
||
248 | } |
||
249 | |||
2332 | Serge | 250 | static void ring_write_tail(struct intel_ring_buffer *ring, |
251 | u32 value) |
||
252 | { |
||
253 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
||
254 | I915_WRITE_TAIL(ring, value); |
||
255 | } |
||
256 | |||
257 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) |
||
258 | { |
||
259 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
||
260 | u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? |
||
261 | RING_ACTHD(ring->mmio_base) : ACTHD; |
||
262 | |||
263 | return I915_READ(acthd_reg); |
||
264 | } |
||
265 | |||
266 | static int init_ring_common(struct intel_ring_buffer *ring) |
||
267 | { |
||
268 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
||
269 | struct drm_i915_gem_object *obj = ring->obj; |
||
270 | u32 head; |
||
271 | |||
272 | /* Stop the ring if it's running. */ |
||
273 | I915_WRITE_CTL(ring, 0); |
||
274 | I915_WRITE_HEAD(ring, 0); |
||
275 | ring->write_tail(ring, 0); |
||
276 | |||
277 | /* Initialize the ring. */ |
||
278 | I915_WRITE_START(ring, obj->gtt_offset); |
||
279 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
||
280 | |||
281 | /* G45 ring initialization fails to reset head to zero */ |
||
282 | if (head != 0) { |
||
283 | DRM_DEBUG_KMS("%s head not reset to zero " |
||
284 | "ctl %08x head %08x tail %08x start %08x\n", |
||
285 | ring->name, |
||
286 | I915_READ_CTL(ring), |
||
287 | I915_READ_HEAD(ring), |
||
288 | I915_READ_TAIL(ring), |
||
289 | I915_READ_START(ring)); |
||
290 | |||
291 | I915_WRITE_HEAD(ring, 0); |
||
292 | |||
293 | if (I915_READ_HEAD(ring) & HEAD_ADDR) { |
||
294 | DRM_ERROR("failed to set %s head to zero " |
||
295 | "ctl %08x head %08x tail %08x start %08x\n", |
||
296 | ring->name, |
||
297 | I915_READ_CTL(ring), |
||
298 | I915_READ_HEAD(ring), |
||
299 | I915_READ_TAIL(ring), |
||
300 | I915_READ_START(ring)); |
||
301 | } |
||
302 | } |
||
303 | |||
304 | I915_WRITE_CTL(ring, |
||
305 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) |
||
306 | | RING_REPORT_64K | RING_VALID); |
||
307 | |||
308 | /* If the head is still not zero, the ring is dead */ |
||
309 | if ((I915_READ_CTL(ring) & RING_VALID) == 0 || |
||
310 | I915_READ_START(ring) != obj->gtt_offset || |
||
311 | (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { |
||
312 | DRM_ERROR("%s initialization failed " |
||
313 | "ctl %08x head %08x tail %08x start %08x\n", |
||
314 | ring->name, |
||
315 | I915_READ_CTL(ring), |
||
316 | I915_READ_HEAD(ring), |
||
317 | I915_READ_TAIL(ring), |
||
318 | I915_READ_START(ring)); |
||
319 | return -EIO; |
||
320 | } |
||
321 | |||
322 | ring->head = I915_READ_HEAD(ring); |
||
323 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
||
324 | ring->space = ring_space(ring); |
||
325 | |||
326 | |||
327 | return 0; |
||
328 | } |
||
329 | |||
330 | static int |
||
331 | init_pipe_control(struct intel_ring_buffer *ring) |
||
332 | { |
||
333 | struct pipe_control *pc; |
||
334 | struct drm_i915_gem_object *obj; |
||
335 | int ret; |
||
336 | |||
337 | if (ring->private) |
||
338 | return 0; |
||
339 | |||
340 | pc = kmalloc(sizeof(*pc), GFP_KERNEL); |
||
341 | if (!pc) |
||
342 | return -ENOMEM; |
||
343 | |||
344 | obj = i915_gem_alloc_object(ring->dev, 4096); |
||
345 | if (obj == NULL) { |
||
346 | DRM_ERROR("Failed to allocate seqno page\n"); |
||
347 | ret = -ENOMEM; |
||
348 | goto err; |
||
349 | } |
||
350 | |||
2339 | Serge | 351 | // i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
2332 | Serge | 352 | |
353 | ret = i915_gem_object_pin(obj, 4096, true); |
||
354 | if (ret) |
||
355 | goto err_unref; |
||
356 | |||
357 | pc->gtt_offset = obj->gtt_offset; |
||
2339 | Serge | 358 | pc->cpu_page = (void*)MapIoMem(obj->pages[0], 4096, PG_SW); |
2332 | Serge | 359 | if (pc->cpu_page == NULL) |
360 | goto err_unpin; |
||
361 | |||
362 | pc->obj = obj; |
||
363 | ring->private = pc; |
||
364 | return 0; |
||
365 | |||
366 | err_unpin: |
||
2344 | Serge | 367 | i915_gem_object_unpin(obj); |
2332 | Serge | 368 | err_unref: |
2344 | Serge | 369 | drm_gem_object_unreference(&obj->base); |
2332 | Serge | 370 | err: |
371 | kfree(pc); |
||
372 | return ret; |
||
373 | } |
||
374 | |||
375 | static void |
||
376 | cleanup_pipe_control(struct intel_ring_buffer *ring) |
||
377 | { |
||
378 | struct pipe_control *pc = ring->private; |
||
379 | struct drm_i915_gem_object *obj; |
||
380 | |||
381 | if (!ring->private) |
||
382 | return; |
||
383 | |||
384 | obj = pc->obj; |
||
2339 | Serge | 385 | // kunmap(obj->pages[0]); |
2344 | Serge | 386 | i915_gem_object_unpin(obj); |
387 | drm_gem_object_unreference(&obj->base); |
||
2332 | Serge | 388 | |
389 | kfree(pc); |
||
390 | ring->private = NULL; |
||
391 | } |
||
392 | |||
393 | static int init_render_ring(struct intel_ring_buffer *ring) |
||
394 | { |
||
395 | struct drm_device *dev = ring->dev; |
||
396 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
397 | int ret = init_ring_common(ring); |
||
398 | |||
399 | if (INTEL_INFO(dev)->gen > 3) { |
||
400 | int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; |
||
401 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
||
402 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; |
||
403 | I915_WRITE(MI_MODE, mode); |
||
404 | if (IS_GEN7(dev)) |
||
405 | I915_WRITE(GFX_MODE_GEN7, |
||
406 | GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | |
||
407 | GFX_MODE_ENABLE(GFX_REPLAY_MODE)); |
||
408 | } |
||
409 | |||
2342 | Serge | 410 | if (INTEL_INFO(dev)->gen >= 5) { |
2339 | Serge | 411 | ret = init_pipe_control(ring); |
2332 | Serge | 412 | if (ret) |
413 | return ret; |
||
414 | } |
||
415 | |||
2342 | Serge | 416 | if (INTEL_INFO(dev)->gen >= 6) { |
417 | I915_WRITE(INSTPM, |
||
418 | INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING); |
||
419 | } |
||
420 | |||
2332 | Serge | 421 | return ret; |
422 | } |
||
423 | |||
424 | static void render_ring_cleanup(struct intel_ring_buffer *ring) |
||
425 | { |
||
426 | if (!ring->private) |
||
427 | return; |
||
428 | |||
429 | cleanup_pipe_control(ring); |
||
430 | } |
||
431 | |||
432 | static void |
||
2342 | Serge | 433 | update_mboxes(struct intel_ring_buffer *ring, |
434 | u32 seqno, |
||
435 | u32 mmio_offset) |
||
2332 | Serge | 436 | { |
2342 | Serge | 437 | intel_ring_emit(ring, MI_SEMAPHORE_MBOX | |
438 | MI_SEMAPHORE_GLOBAL_GTT | |
||
2332 | Serge | 439 | MI_SEMAPHORE_REGISTER | |
440 | MI_SEMAPHORE_UPDATE); |
||
441 | intel_ring_emit(ring, seqno); |
||
2342 | Serge | 442 | intel_ring_emit(ring, mmio_offset); |
2332 | Serge | 443 | } |
444 | |||
2342 | Serge | 445 | /** |
446 | * gen6_add_request - Update the semaphore mailbox registers |
||
447 | * |
||
448 | * @ring - ring that is adding a request |
||
449 | * @seqno - return seqno stuck into the ring |
||
450 | * |
||
451 | * Update the mailbox registers in the *other* rings with the current seqno. |
||
452 | * This acts like a signal in the canonical semaphore. |
||
453 | */ |
||
2332 | Serge | 454 | static int |
455 | gen6_add_request(struct intel_ring_buffer *ring, |
||
2342 | Serge | 456 | u32 *seqno) |
2332 | Serge | 457 | { |
2342 | Serge | 458 | u32 mbox1_reg; |
459 | u32 mbox2_reg; |
||
2332 | Serge | 460 | int ret; |
461 | |||
462 | ret = intel_ring_begin(ring, 10); |
||
463 | if (ret) |
||
464 | return ret; |
||
465 | |||
2342 | Serge | 466 | mbox1_reg = ring->signal_mbox[0]; |
467 | mbox2_reg = ring->signal_mbox[1]; |
||
2332 | Serge | 468 | |
2342 | Serge | 469 | *seqno = i915_gem_get_seqno(ring->dev); |
470 | |||
471 | update_mboxes(ring, *seqno, mbox1_reg); |
||
472 | update_mboxes(ring, *seqno, mbox2_reg); |
||
2332 | Serge | 473 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
474 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
||
2342 | Serge | 475 | intel_ring_emit(ring, *seqno); |
2332 | Serge | 476 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
477 | intel_ring_advance(ring); |
||
478 | |||
479 | return 0; |
||
480 | } |
||
481 | |||
2342 | Serge | 482 | /** |
483 | * intel_ring_sync - sync the waiter to the signaller on seqno |
||
484 | * |
||
485 | * @waiter - ring that is waiting |
||
486 | * @signaller - ring which has, or will signal |
||
487 | * @seqno - seqno which the waiter will block on |
||
488 | */ |
||
489 | static int |
||
490 | intel_ring_sync(struct intel_ring_buffer *waiter, |
||
491 | struct intel_ring_buffer *signaller, |
||
492 | int ring, |
||
2332 | Serge | 493 | u32 seqno) |
494 | { |
||
495 | int ret; |
||
2342 | Serge | 496 | u32 dw1 = MI_SEMAPHORE_MBOX | |
497 | MI_SEMAPHORE_COMPARE | |
||
498 | MI_SEMAPHORE_REGISTER; |
||
2332 | Serge | 499 | |
2342 | Serge | 500 | ret = intel_ring_begin(waiter, 4); |
2332 | Serge | 501 | if (ret) |
502 | return ret; |
||
503 | |||
2342 | Serge | 504 | intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]); |
505 | intel_ring_emit(waiter, seqno); |
||
506 | intel_ring_emit(waiter, 0); |
||
507 | intel_ring_emit(waiter, MI_NOOP); |
||
508 | intel_ring_advance(waiter); |
||
2332 | Serge | 509 | |
510 | return 0; |
||
511 | } |
||
512 | |||
2342 | Serge | 513 | /* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */ |
514 | int |
||
515 | render_ring_sync_to(struct intel_ring_buffer *waiter, |
||
516 | struct intel_ring_buffer *signaller, |
||
517 | u32 seqno) |
||
518 | { |
||
519 | // WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID); |
||
520 | return intel_ring_sync(waiter, |
||
521 | signaller, |
||
522 | RCS, |
||
523 | seqno); |
||
524 | } |
||
525 | |||
526 | /* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */ |
||
527 | int |
||
528 | gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter, |
||
529 | struct intel_ring_buffer *signaller, |
||
530 | u32 seqno) |
||
531 | { |
||
532 | // WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID); |
||
533 | return intel_ring_sync(waiter, |
||
534 | signaller, |
||
535 | VCS, |
||
536 | seqno); |
||
537 | } |
||
538 | |||
539 | /* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */ |
||
540 | int |
||
541 | gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter, |
||
542 | struct intel_ring_buffer *signaller, |
||
543 | u32 seqno) |
||
544 | { |
||
545 | // WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID); |
||
546 | return intel_ring_sync(waiter, |
||
547 | signaller, |
||
548 | BCS, |
||
549 | seqno); |
||
550 | } |
||
551 | |||
552 | |||
553 | |||
2332 | Serge | 554 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ |
555 | do { \ |
||
2342 | Serge | 556 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ |
557 | PIPE_CONTROL_DEPTH_STALL); \ |
||
2332 | Serge | 558 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ |
559 | intel_ring_emit(ring__, 0); \ |
||
560 | intel_ring_emit(ring__, 0); \ |
||
561 | } while (0) |
||
562 | |||
563 | static int |
||
564 | pc_render_add_request(struct intel_ring_buffer *ring, |
||
565 | u32 *result) |
||
566 | { |
||
567 | struct drm_device *dev = ring->dev; |
||
568 | u32 seqno = i915_gem_get_seqno(dev); |
||
569 | struct pipe_control *pc = ring->private; |
||
570 | u32 scratch_addr = pc->gtt_offset + 128; |
||
571 | int ret; |
||
572 | |||
573 | /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently |
||
574 | * incoherent with writes to memory, i.e. completely fubar, |
||
575 | * so we need to use PIPE_NOTIFY instead. |
||
576 | * |
||
577 | * However, we also need to workaround the qword write |
||
578 | * incoherence by flushing the 6 PIPE_NOTIFY buffers out to |
||
579 | * memory before requesting an interrupt. |
||
580 | */ |
||
581 | ret = intel_ring_begin(ring, 32); |
||
582 | if (ret) |
||
583 | return ret; |
||
584 | |||
2342 | Serge | 585 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
586 | PIPE_CONTROL_WRITE_FLUSH | |
||
587 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); |
||
2332 | Serge | 588 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
589 | intel_ring_emit(ring, seqno); |
||
590 | intel_ring_emit(ring, 0); |
||
591 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
||
592 | scratch_addr += 128; /* write to separate cachelines */ |
||
593 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
||
594 | scratch_addr += 128; |
||
595 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
||
596 | scratch_addr += 128; |
||
597 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
||
598 | scratch_addr += 128; |
||
599 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
||
600 | scratch_addr += 128; |
||
601 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
||
2342 | Serge | 602 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
603 | PIPE_CONTROL_WRITE_FLUSH | |
||
604 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | |
||
2332 | Serge | 605 | PIPE_CONTROL_NOTIFY); |
606 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
||
607 | intel_ring_emit(ring, seqno); |
||
608 | intel_ring_emit(ring, 0); |
||
609 | intel_ring_advance(ring); |
||
610 | |||
611 | *result = seqno; |
||
612 | return 0; |
||
613 | } |
||
614 | |||
615 | static int |
||
616 | render_ring_add_request(struct intel_ring_buffer *ring, |
||
617 | u32 *result) |
||
618 | { |
||
619 | struct drm_device *dev = ring->dev; |
||
620 | u32 seqno = i915_gem_get_seqno(dev); |
||
621 | int ret; |
||
622 | |||
623 | ret = intel_ring_begin(ring, 4); |
||
624 | if (ret) |
||
625 | return ret; |
||
626 | |||
627 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
||
628 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
||
629 | intel_ring_emit(ring, seqno); |
||
630 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
||
631 | intel_ring_advance(ring); |
||
632 | |||
633 | *result = seqno; |
||
634 | return 0; |
||
635 | } |
||
636 | |||
637 | static u32 |
||
2342 | Serge | 638 | gen6_ring_get_seqno(struct intel_ring_buffer *ring) |
639 | { |
||
640 | struct drm_device *dev = ring->dev; |
||
641 | |||
642 | /* Workaround to force correct ordering between irq and seqno writes on |
||
643 | * ivb (and maybe also on snb) by reading from a CS register (like |
||
644 | * ACTHD) before reading the status page. */ |
||
645 | if (IS_GEN7(dev)) |
||
646 | intel_ring_get_active_head(ring); |
||
647 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
||
648 | } |
||
649 | |||
650 | static u32 |
||
2332 | Serge | 651 | ring_get_seqno(struct intel_ring_buffer *ring) |
652 | { |
||
653 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
||
654 | } |
||
655 | |||
656 | static u32 |
||
657 | pc_render_get_seqno(struct intel_ring_buffer *ring) |
||
658 | { |
||
659 | struct pipe_control *pc = ring->private; |
||
660 | return pc->cpu_page[0]; |
||
661 | } |
||
662 | |||
663 | static void |
||
664 | ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask) |
||
665 | { |
||
666 | dev_priv->gt_irq_mask &= ~mask; |
||
667 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
||
668 | POSTING_READ(GTIMR); |
||
669 | } |
||
670 | |||
671 | static void |
||
672 | ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask) |
||
673 | { |
||
674 | dev_priv->gt_irq_mask |= mask; |
||
675 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
||
676 | POSTING_READ(GTIMR); |
||
677 | } |
||
678 | |||
679 | static void |
||
680 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) |
||
681 | { |
||
682 | dev_priv->irq_mask &= ~mask; |
||
683 | I915_WRITE(IMR, dev_priv->irq_mask); |
||
684 | POSTING_READ(IMR); |
||
685 | } |
||
686 | |||
687 | static void |
||
688 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) |
||
689 | { |
||
690 | dev_priv->irq_mask |= mask; |
||
691 | I915_WRITE(IMR, dev_priv->irq_mask); |
||
692 | POSTING_READ(IMR); |
||
693 | } |
||
694 | |||
695 | static bool |
||
696 | render_ring_get_irq(struct intel_ring_buffer *ring) |
||
697 | { |
||
698 | struct drm_device *dev = ring->dev; |
||
699 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
700 | |||
701 | if (!dev->irq_enabled) |
||
702 | return false; |
||
703 | |||
704 | spin_lock(&ring->irq_lock); |
||
705 | if (ring->irq_refcount++ == 0) { |
||
706 | if (HAS_PCH_SPLIT(dev)) |
||
707 | ironlake_enable_irq(dev_priv, |
||
708 | GT_PIPE_NOTIFY | GT_USER_INTERRUPT); |
||
709 | else |
||
710 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); |
||
711 | } |
||
712 | spin_unlock(&ring->irq_lock); |
||
713 | |||
714 | return true; |
||
715 | } |
||
716 | |||
717 | static void |
||
718 | render_ring_put_irq(struct intel_ring_buffer *ring) |
||
719 | { |
||
720 | struct drm_device *dev = ring->dev; |
||
721 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
722 | |||
723 | spin_lock(&ring->irq_lock); |
||
724 | if (--ring->irq_refcount == 0) { |
||
725 | if (HAS_PCH_SPLIT(dev)) |
||
726 | ironlake_disable_irq(dev_priv, |
||
727 | GT_USER_INTERRUPT | |
||
728 | GT_PIPE_NOTIFY); |
||
729 | else |
||
730 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); |
||
731 | } |
||
732 | spin_unlock(&ring->irq_lock); |
||
733 | } |
||
734 | |||
735 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) |
||
736 | { |
||
737 | struct drm_device *dev = ring->dev; |
||
738 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
||
739 | u32 mmio = 0; |
||
740 | |||
741 | /* The ring status page addresses are no longer next to the rest of |
||
742 | * the ring registers as of gen7. |
||
743 | */ |
||
744 | if (IS_GEN7(dev)) { |
||
745 | switch (ring->id) { |
||
746 | case RING_RENDER: |
||
747 | mmio = RENDER_HWS_PGA_GEN7; |
||
748 | break; |
||
749 | case RING_BLT: |
||
750 | mmio = BLT_HWS_PGA_GEN7; |
||
751 | break; |
||
752 | case RING_BSD: |
||
753 | mmio = BSD_HWS_PGA_GEN7; |
||
754 | break; |
||
755 | } |
||
756 | } else if (IS_GEN6(ring->dev)) { |
||
757 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); |
||
758 | } else { |
||
759 | mmio = RING_HWS_PGA(ring->mmio_base); |
||
760 | } |
||
761 | |||
762 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
||
763 | POSTING_READ(mmio); |
||
764 | } |
||
765 | |||
766 | static int |
||
767 | bsd_ring_flush(struct intel_ring_buffer *ring, |
||
768 | u32 invalidate_domains, |
||
769 | u32 flush_domains) |
||
770 | { |
||
771 | int ret; |
||
772 | |||
773 | ret = intel_ring_begin(ring, 2); |
||
774 | if (ret) |
||
775 | return ret; |
||
776 | |||
777 | intel_ring_emit(ring, MI_FLUSH); |
||
778 | intel_ring_emit(ring, MI_NOOP); |
||
779 | intel_ring_advance(ring); |
||
780 | return 0; |
||
781 | } |
||
782 | |||
783 | static int |
||
784 | ring_add_request(struct intel_ring_buffer *ring, |
||
785 | u32 *result) |
||
786 | { |
||
787 | u32 seqno; |
||
788 | int ret; |
||
789 | |||
790 | ret = intel_ring_begin(ring, 4); |
||
791 | if (ret) |
||
792 | return ret; |
||
793 | |||
794 | seqno = i915_gem_get_seqno(ring->dev); |
||
795 | |||
796 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
||
797 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
||
798 | intel_ring_emit(ring, seqno); |
||
799 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
||
800 | intel_ring_advance(ring); |
||
801 | |||
802 | *result = seqno; |
||
803 | return 0; |
||
804 | } |
||
805 | |||
806 | static bool |
||
807 | gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
||
808 | { |
||
809 | struct drm_device *dev = ring->dev; |
||
810 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
811 | |||
812 | if (!dev->irq_enabled) |
||
813 | return false; |
||
814 | |||
2342 | Serge | 815 | /* It looks like we need to prevent the gt from suspending while waiting |
816 | * for an notifiy irq, otherwise irqs seem to get lost on at least the |
||
817 | * blt/bsd rings on ivb. */ |
||
818 | if (IS_GEN7(dev)) |
||
819 | gen6_gt_force_wake_get(dev_priv); |
||
820 | |||
2332 | Serge | 821 | spin_lock(&ring->irq_lock); |
822 | if (ring->irq_refcount++ == 0) { |
||
823 | ring->irq_mask &= ~rflag; |
||
824 | I915_WRITE_IMR(ring, ring->irq_mask); |
||
825 | ironlake_enable_irq(dev_priv, gflag); |
||
826 | } |
||
827 | spin_unlock(&ring->irq_lock); |
||
828 | |||
829 | return true; |
||
830 | } |
||
831 | |||
832 | static void |
||
833 | gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
||
834 | { |
||
835 | struct drm_device *dev = ring->dev; |
||
836 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
837 | |||
838 | spin_lock(&ring->irq_lock); |
||
839 | if (--ring->irq_refcount == 0) { |
||
840 | ring->irq_mask |= rflag; |
||
841 | I915_WRITE_IMR(ring, ring->irq_mask); |
||
842 | ironlake_disable_irq(dev_priv, gflag); |
||
843 | } |
||
844 | spin_unlock(&ring->irq_lock); |
||
2342 | Serge | 845 | |
846 | if (IS_GEN7(dev)) |
||
847 | gen6_gt_force_wake_put(dev_priv); |
||
2332 | Serge | 848 | } |
849 | |||
850 | static bool |
||
851 | bsd_ring_get_irq(struct intel_ring_buffer *ring) |
||
852 | { |
||
853 | struct drm_device *dev = ring->dev; |
||
854 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
855 | |||
856 | if (!dev->irq_enabled) |
||
857 | return false; |
||
858 | |||
859 | spin_lock(&ring->irq_lock); |
||
860 | if (ring->irq_refcount++ == 0) { |
||
861 | if (IS_G4X(dev)) |
||
862 | i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT); |
||
863 | else |
||
864 | ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT); |
||
865 | } |
||
866 | spin_unlock(&ring->irq_lock); |
||
867 | |||
868 | return true; |
||
869 | } |
||
870 | static void |
||
871 | bsd_ring_put_irq(struct intel_ring_buffer *ring) |
||
872 | { |
||
873 | struct drm_device *dev = ring->dev; |
||
874 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
875 | |||
876 | spin_lock(&ring->irq_lock); |
||
877 | if (--ring->irq_refcount == 0) { |
||
878 | if (IS_G4X(dev)) |
||
879 | i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT); |
||
880 | else |
||
881 | ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT); |
||
882 | } |
||
883 | spin_unlock(&ring->irq_lock); |
||
884 | } |
||
885 | |||
886 | static int |
||
887 | ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) |
||
888 | { |
||
889 | int ret; |
||
890 | |||
891 | ret = intel_ring_begin(ring, 2); |
||
892 | if (ret) |
||
893 | return ret; |
||
894 | |||
895 | intel_ring_emit(ring, |
||
896 | MI_BATCH_BUFFER_START | (2 << 6) | |
||
897 | MI_BATCH_NON_SECURE_I965); |
||
898 | intel_ring_emit(ring, offset); |
||
899 | intel_ring_advance(ring); |
||
900 | |||
901 | return 0; |
||
902 | } |
||
903 | |||
904 | static int |
||
905 | render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
||
906 | u32 offset, u32 len) |
||
907 | { |
||
908 | struct drm_device *dev = ring->dev; |
||
909 | int ret; |
||
910 | |||
911 | if (IS_I830(dev) || IS_845G(dev)) { |
||
912 | ret = intel_ring_begin(ring, 4); |
||
913 | if (ret) |
||
914 | return ret; |
||
915 | |||
916 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
||
917 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
||
918 | intel_ring_emit(ring, offset + len - 8); |
||
919 | intel_ring_emit(ring, 0); |
||
920 | } else { |
||
921 | ret = intel_ring_begin(ring, 2); |
||
922 | if (ret) |
||
923 | return ret; |
||
924 | |||
925 | if (INTEL_INFO(dev)->gen >= 4) { |
||
926 | intel_ring_emit(ring, |
||
927 | MI_BATCH_BUFFER_START | (2 << 6) | |
||
928 | MI_BATCH_NON_SECURE_I965); |
||
929 | intel_ring_emit(ring, offset); |
||
930 | } else { |
||
931 | intel_ring_emit(ring, |
||
932 | MI_BATCH_BUFFER_START | (2 << 6)); |
||
933 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
||
934 | } |
||
935 | } |
||
936 | intel_ring_advance(ring); |
||
937 | |||
938 | return 0; |
||
939 | } |
||
940 | |||
941 | static void cleanup_status_page(struct intel_ring_buffer *ring) |
||
942 | { |
||
943 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
||
944 | struct drm_i915_gem_object *obj; |
||
945 | |||
946 | obj = ring->status_page.obj; |
||
947 | if (obj == NULL) |
||
948 | return; |
||
949 | |||
950 | kunmap(obj->pages[0]); |
||
2344 | Serge | 951 | i915_gem_object_unpin(obj); |
952 | drm_gem_object_unreference(&obj->base); |
||
2332 | Serge | 953 | ring->status_page.obj = NULL; |
954 | |||
955 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
||
956 | } |
||
957 | |||
958 | static int init_status_page(struct intel_ring_buffer *ring) |
||
959 | { |
||
960 | struct drm_device *dev = ring->dev; |
||
961 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
962 | struct drm_i915_gem_object *obj; |
||
963 | int ret; |
||
964 | |||
965 | obj = i915_gem_alloc_object(dev, 4096); |
||
966 | if (obj == NULL) { |
||
967 | DRM_ERROR("Failed to allocate status page\n"); |
||
968 | ret = -ENOMEM; |
||
969 | goto err; |
||
970 | } |
||
971 | |||
2340 | Serge | 972 | // i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
2332 | Serge | 973 | |
974 | ret = i915_gem_object_pin(obj, 4096, true); |
||
975 | if (ret != 0) { |
||
976 | goto err_unref; |
||
977 | } |
||
978 | |||
979 | ring->status_page.gfx_addr = obj->gtt_offset; |
||
2340 | Serge | 980 | ring->status_page.page_addr = MapIoMem(obj->pages[0], 4096, PG_SW); |
2332 | Serge | 981 | if (ring->status_page.page_addr == NULL) { |
982 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
||
983 | goto err_unpin; |
||
984 | } |
||
985 | ring->status_page.obj = obj; |
||
986 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
||
987 | |||
988 | intel_ring_setup_status_page(ring); |
||
989 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
||
990 | ring->name, ring->status_page.gfx_addr); |
||
991 | |||
992 | return 0; |
||
993 | |||
994 | err_unpin: |
||
2344 | Serge | 995 | i915_gem_object_unpin(obj); |
2332 | Serge | 996 | err_unref: |
2344 | Serge | 997 | drm_gem_object_unreference(&obj->base); |
2332 | Serge | 998 | err: |
999 | return ret; |
||
1000 | } |
||
1001 | |||
1002 | int intel_init_ring_buffer(struct drm_device *dev, |
||
1003 | struct intel_ring_buffer *ring) |
||
1004 | { |
||
2340 | Serge | 1005 | struct drm_i915_gem_object *obj; |
2332 | Serge | 1006 | int ret; |
2340 | Serge | 1007 | |
2332 | Serge | 1008 | ring->dev = dev; |
1009 | INIT_LIST_HEAD(&ring->active_list); |
||
1010 | INIT_LIST_HEAD(&ring->request_list); |
||
1011 | INIT_LIST_HEAD(&ring->gpu_write_list); |
||
1012 | |||
1013 | // init_waitqueue_head(&ring->irq_queue); |
||
2342 | Serge | 1014 | spin_lock_init(&ring->irq_lock); |
2332 | Serge | 1015 | ring->irq_mask = ~0; |
1016 | |||
1017 | if (I915_NEED_GFX_HWS(dev)) { |
||
2340 | Serge | 1018 | ret = init_status_page(ring); |
1019 | if (ret) |
||
1020 | return ret; |
||
2332 | Serge | 1021 | } |
1022 | |||
1023 | obj = i915_gem_alloc_object(dev, ring->size); |
||
1024 | if (obj == NULL) { |
||
1025 | DRM_ERROR("Failed to allocate ringbuffer\n"); |
||
1026 | ret = -ENOMEM; |
||
1027 | goto err_hws; |
||
1028 | } |
||
1029 | |||
1030 | ring->obj = obj; |
||
1031 | |||
1032 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); |
||
1033 | if (ret) |
||
1034 | goto err_unref; |
||
1035 | |||
1036 | ring->map.size = ring->size; |
||
1037 | ring->map.offset = get_bus_addr() + obj->gtt_offset; |
||
1038 | ring->map.type = 0; |
||
1039 | ring->map.flags = 0; |
||
1040 | ring->map.mtrr = 0; |
||
1041 | |||
1042 | // drm_core_ioremap_wc(&ring->map, dev); |
||
1043 | |||
1044 | ring->map.handle = ioremap(ring->map.offset, ring->map.size); |
||
1045 | |||
1046 | if (ring->map.handle == NULL) { |
||
1047 | DRM_ERROR("Failed to map ringbuffer.\n"); |
||
1048 | ret = -EINVAL; |
||
1049 | goto err_unpin; |
||
1050 | } |
||
1051 | |||
1052 | ring->virtual_start = ring->map.handle; |
||
1053 | ret = ring->init(ring); |
||
1054 | if (ret) |
||
1055 | goto err_unmap; |
||
1056 | |||
1057 | /* Workaround an erratum on the i830 which causes a hang if |
||
1058 | * the TAIL pointer points to within the last 2 cachelines |
||
1059 | * of the buffer. |
||
1060 | */ |
||
1061 | ring->effective_size = ring->size; |
||
1062 | if (IS_I830(ring->dev)) |
||
1063 | ring->effective_size -= 128; |
||
2340 | Serge | 1064 | |
2332 | Serge | 1065 | return 0; |
1066 | |||
1067 | err_unmap: |
||
1068 | FreeKernelSpace(ring->virtual_start); |
||
1069 | err_unpin: |
||
2344 | Serge | 1070 | i915_gem_object_unpin(obj); |
2332 | Serge | 1071 | err_unref: |
2344 | Serge | 1072 | drm_gem_object_unreference(&obj->base); |
2332 | Serge | 1073 | ring->obj = NULL; |
1074 | err_hws: |
||
1075 | // cleanup_status_page(ring); |
||
1076 | return ret; |
||
1077 | } |
||
1078 | |||
1079 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) |
||
1080 | { |
||
1081 | struct drm_i915_private *dev_priv; |
||
1082 | int ret; |
||
1083 | |||
1084 | if (ring->obj == NULL) |
||
1085 | return; |
||
1086 | |||
1087 | /* Disable the ring buffer. The ring must be idle at this point */ |
||
1088 | dev_priv = ring->dev->dev_private; |
||
1089 | ret = intel_wait_ring_idle(ring); |
||
1090 | if (ret) |
||
1091 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", |
||
1092 | ring->name, ret); |
||
1093 | |||
1094 | I915_WRITE_CTL(ring, 0); |
||
1095 | |||
1096 | // drm_core_ioremapfree(&ring->map, ring->dev); |
||
1097 | |||
2344 | Serge | 1098 | i915_gem_object_unpin(ring->obj); |
1099 | drm_gem_object_unreference(&ring->obj->base); |
||
2332 | Serge | 1100 | ring->obj = NULL; |
1101 | |||
1102 | if (ring->cleanup) |
||
1103 | ring->cleanup(ring); |
||
1104 | |||
1105 | // cleanup_status_page(ring); |
||
1106 | } |
||
1107 | |||
1108 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
||
1109 | { |
||
1110 | unsigned int *virt; |
||
1111 | int rem = ring->size - ring->tail; |
||
1112 | |||
2340 | Serge | 1113 | ENTER(); |
1114 | |||
2332 | Serge | 1115 | if (ring->space < rem) { |
1116 | int ret = intel_wait_ring_buffer(ring, rem); |
||
1117 | if (ret) |
||
1118 | return ret; |
||
1119 | } |
||
1120 | |||
1121 | virt = (unsigned int *)(ring->virtual_start + ring->tail); |
||
1122 | rem /= 8; |
||
1123 | while (rem--) { |
||
1124 | *virt++ = MI_NOOP; |
||
1125 | *virt++ = MI_NOOP; |
||
1126 | } |
||
1127 | |||
1128 | ring->tail = 0; |
||
1129 | ring->space = ring_space(ring); |
||
1130 | |||
2340 | Serge | 1131 | LEAVE(); |
2332 | Serge | 1132 | return 0; |
1133 | } |
||
1134 | |||
1135 | int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) |
||
1136 | { |
||
1137 | struct drm_device *dev = ring->dev; |
||
1138 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1139 | unsigned long end; |
||
1140 | u32 head; |
||
1141 | |||
2340 | Serge | 1142 | ENTER(); |
1143 | |||
2332 | Serge | 1144 | /* If the reported head position has wrapped or hasn't advanced, |
1145 | * fallback to the slow and accurate path. |
||
1146 | */ |
||
1147 | head = intel_read_status_page(ring, 4); |
||
1148 | if (head > ring->head) { |
||
1149 | ring->head = head; |
||
1150 | ring->space = ring_space(ring); |
||
1151 | if (ring->space >= n) |
||
2340 | Serge | 1152 | { |
1153 | LEAVE(); |
||
2332 | Serge | 1154 | return 0; |
2340 | Serge | 1155 | }; |
2332 | Serge | 1156 | } |
1157 | |||
2344 | Serge | 1158 | |
2332 | Serge | 1159 | end = jiffies + 3 * HZ; |
1160 | do { |
||
1161 | ring->head = I915_READ_HEAD(ring); |
||
1162 | ring->space = ring_space(ring); |
||
1163 | if (ring->space >= n) { |
||
1164 | // trace_i915_ring_wait_end(ring); |
||
2340 | Serge | 1165 | LEAVE(); |
2332 | Serge | 1166 | return 0; |
1167 | } |
||
1168 | |||
1169 | msleep(1); |
||
1170 | if (atomic_read(&dev_priv->mm.wedged)) |
||
2340 | Serge | 1171 | { |
1172 | LEAVE(); |
||
2332 | Serge | 1173 | return -EAGAIN; |
2340 | Serge | 1174 | }; |
2332 | Serge | 1175 | } while (!time_after(jiffies, end)); |
2340 | Serge | 1176 | LEAVE(); |
1177 | |||
2332 | Serge | 1178 | return -EBUSY; |
1179 | } |
||
1180 | |||
1181 | int intel_ring_begin(struct intel_ring_buffer *ring, |
||
1182 | int num_dwords) |
||
1183 | { |
||
1184 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
||
1185 | int n = 4*num_dwords; |
||
1186 | int ret; |
||
1187 | |||
2340 | Serge | 1188 | // if (unlikely(atomic_read(&dev_priv->mm.wedged))) |
1189 | // return -EIO; |
||
2332 | Serge | 1190 | |
1191 | if (unlikely(ring->tail + n > ring->effective_size)) { |
||
1192 | ret = intel_wrap_ring_buffer(ring); |
||
1193 | if (unlikely(ret)) |
||
1194 | return ret; |
||
1195 | } |
||
1196 | |||
1197 | if (unlikely(ring->space < n)) { |
||
1198 | ret = intel_wait_ring_buffer(ring, n); |
||
1199 | if (unlikely(ret)) |
||
1200 | return ret; |
||
1201 | } |
||
1202 | |||
1203 | ring->space -= n; |
||
1204 | return 0; |
||
1205 | } |
||
1206 | |||
1207 | void intel_ring_advance(struct intel_ring_buffer *ring) |
||
1208 | { |
||
1209 | ring->tail &= ring->size - 1; |
||
1210 | ring->write_tail(ring, ring->tail); |
||
1211 | } |
||
1212 | |||
1213 | static const struct intel_ring_buffer render_ring = { |
||
1214 | .name = "render ring", |
||
1215 | .id = RING_RENDER, |
||
1216 | .mmio_base = RENDER_RING_BASE, |
||
1217 | .size = 32 * PAGE_SIZE, |
||
1218 | .init = init_render_ring, |
||
1219 | .write_tail = ring_write_tail, |
||
1220 | .flush = render_ring_flush, |
||
2339 | Serge | 1221 | .add_request = render_ring_add_request, |
2342 | Serge | 1222 | .get_seqno = ring_get_seqno, |
1223 | .irq_get = render_ring_get_irq, |
||
1224 | .irq_put = render_ring_put_irq, |
||
2340 | Serge | 1225 | .dispatch_execbuffer = render_ring_dispatch_execbuffer, |
2332 | Serge | 1226 | // .cleanup = render_ring_cleanup, |
2342 | Serge | 1227 | .sync_to = render_ring_sync_to, |
1228 | .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID, |
||
1229 | MI_SEMAPHORE_SYNC_RV, |
||
1230 | MI_SEMAPHORE_SYNC_RB}, |
||
1231 | .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC}, |
||
2332 | Serge | 1232 | }; |
1233 | |||
1234 | /* ring buffer for bit-stream decoder */ |
||
1235 | |||
1236 | static const struct intel_ring_buffer bsd_ring = { |
||
1237 | .name = "bsd ring", |
||
1238 | .id = RING_BSD, |
||
1239 | .mmio_base = BSD_RING_BASE, |
||
1240 | .size = 32 * PAGE_SIZE, |
||
1241 | .init = init_ring_common, |
||
1242 | .write_tail = ring_write_tail, |
||
1243 | .flush = bsd_ring_flush, |
||
2339 | Serge | 1244 | .add_request = ring_add_request, |
2342 | Serge | 1245 | .get_seqno = ring_get_seqno, |
1246 | .irq_get = bsd_ring_get_irq, |
||
1247 | .irq_put = bsd_ring_put_irq, |
||
2340 | Serge | 1248 | .dispatch_execbuffer = ring_dispatch_execbuffer, |
2332 | Serge | 1249 | }; |
1250 | |||
1251 | |||
1252 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, |
||
1253 | u32 value) |
||
1254 | { |
||
1255 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
||
1256 | |||
1257 | /* Every tail move must follow the sequence below */ |
||
1258 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
||
1259 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | |
||
1260 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE); |
||
1261 | I915_WRITE(GEN6_BSD_RNCID, 0x0); |
||
1262 | |||
1263 | if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & |
||
1264 | GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, |
||
1265 | 50)) |
||
1266 | DRM_ERROR("timed out waiting for IDLE Indicator\n"); |
||
1267 | |||
1268 | I915_WRITE_TAIL(ring, value); |
||
1269 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
||
1270 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | |
||
1271 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); |
||
1272 | } |
||
1273 | |||
1274 | static int gen6_ring_flush(struct intel_ring_buffer *ring, |
||
1275 | u32 invalidate, u32 flush) |
||
1276 | { |
||
1277 | uint32_t cmd; |
||
1278 | int ret; |
||
1279 | |||
1280 | ret = intel_ring_begin(ring, 4); |
||
1281 | if (ret) |
||
1282 | return ret; |
||
1283 | |||
1284 | cmd = MI_FLUSH_DW; |
||
1285 | if (invalidate & I915_GEM_GPU_DOMAINS) |
||
1286 | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; |
||
1287 | intel_ring_emit(ring, cmd); |
||
1288 | intel_ring_emit(ring, 0); |
||
1289 | intel_ring_emit(ring, 0); |
||
1290 | intel_ring_emit(ring, MI_NOOP); |
||
1291 | intel_ring_advance(ring); |
||
1292 | return 0; |
||
1293 | } |
||
1294 | |||
1295 | static int |
||
1296 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
||
1297 | u32 offset, u32 len) |
||
1298 | { |
||
1299 | int ret; |
||
1300 | |||
1301 | ret = intel_ring_begin(ring, 2); |
||
1302 | if (ret) |
||
1303 | return ret; |
||
1304 | |||
1305 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); |
||
1306 | /* bit0-7 is the length on GEN6+ */ |
||
1307 | intel_ring_emit(ring, offset); |
||
1308 | intel_ring_advance(ring); |
||
1309 | |||
1310 | return 0; |
||
1311 | } |
||
1312 | |||
1313 | static bool |
||
1314 | gen6_render_ring_get_irq(struct intel_ring_buffer *ring) |
||
1315 | { |
||
1316 | return gen6_ring_get_irq(ring, |
||
1317 | GT_USER_INTERRUPT, |
||
1318 | GEN6_RENDER_USER_INTERRUPT); |
||
1319 | } |
||
1320 | |||
1321 | static void |
||
1322 | gen6_render_ring_put_irq(struct intel_ring_buffer *ring) |
||
1323 | { |
||
1324 | return gen6_ring_put_irq(ring, |
||
1325 | GT_USER_INTERRUPT, |
||
1326 | GEN6_RENDER_USER_INTERRUPT); |
||
1327 | } |
||
1328 | |||
1329 | static bool |
||
1330 | gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) |
||
1331 | { |
||
1332 | return gen6_ring_get_irq(ring, |
||
1333 | GT_GEN6_BSD_USER_INTERRUPT, |
||
1334 | GEN6_BSD_USER_INTERRUPT); |
||
1335 | } |
||
1336 | |||
1337 | static void |
||
1338 | gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) |
||
1339 | { |
||
1340 | return gen6_ring_put_irq(ring, |
||
1341 | GT_GEN6_BSD_USER_INTERRUPT, |
||
1342 | GEN6_BSD_USER_INTERRUPT); |
||
1343 | } |
||
1344 | |||
1345 | /* ring buffer for Video Codec for Gen6+ */ |
||
1346 | static const struct intel_ring_buffer gen6_bsd_ring = { |
||
1347 | .name = "gen6 bsd ring", |
||
1348 | .id = RING_BSD, |
||
1349 | .mmio_base = GEN6_BSD_RING_BASE, |
||
1350 | .size = 32 * PAGE_SIZE, |
||
1351 | .init = init_ring_common, |
||
1352 | .write_tail = gen6_bsd_ring_write_tail, |
||
1353 | .flush = gen6_ring_flush, |
||
2339 | Serge | 1354 | .add_request = gen6_add_request, |
2342 | Serge | 1355 | .get_seqno = gen6_ring_get_seqno, |
1356 | .irq_get = gen6_bsd_ring_get_irq, |
||
1357 | .irq_put = gen6_bsd_ring_put_irq, |
||
2340 | Serge | 1358 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
2342 | Serge | 1359 | .sync_to = gen6_bsd_ring_sync_to, |
1360 | .semaphore_register = {MI_SEMAPHORE_SYNC_VR, |
||
1361 | MI_SEMAPHORE_SYNC_INVALID, |
||
1362 | MI_SEMAPHORE_SYNC_VB}, |
||
1363 | .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC}, |
||
2332 | Serge | 1364 | }; |
1365 | |||
1366 | /* Blitter support (SandyBridge+) */ |
||
1367 | |||
1368 | static bool |
||
1369 | blt_ring_get_irq(struct intel_ring_buffer *ring) |
||
1370 | { |
||
1371 | return gen6_ring_get_irq(ring, |
||
1372 | GT_BLT_USER_INTERRUPT, |
||
1373 | GEN6_BLITTER_USER_INTERRUPT); |
||
1374 | } |
||
1375 | |||
1376 | static void |
||
1377 | blt_ring_put_irq(struct intel_ring_buffer *ring) |
||
1378 | { |
||
1379 | gen6_ring_put_irq(ring, |
||
1380 | GT_BLT_USER_INTERRUPT, |
||
1381 | GEN6_BLITTER_USER_INTERRUPT); |
||
1382 | } |
||
1383 | |||
1384 | |||
1385 | /* Workaround for some stepping of SNB, |
||
1386 | * each time when BLT engine ring tail moved, |
||
1387 | * the first command in the ring to be parsed |
||
1388 | * should be MI_BATCH_BUFFER_START |
||
1389 | */ |
||
1390 | #define NEED_BLT_WORKAROUND(dev) \ |
||
1391 | (IS_GEN6(dev) && (dev->pdev->revision < 8)) |
||
1392 | |||
1393 | static inline struct drm_i915_gem_object * |
||
1394 | to_blt_workaround(struct intel_ring_buffer *ring) |
||
1395 | { |
||
1396 | return ring->private; |
||
1397 | } |
||
1398 | |||
1399 | static int blt_ring_init(struct intel_ring_buffer *ring) |
||
1400 | { |
||
1401 | if (NEED_BLT_WORKAROUND(ring->dev)) { |
||
1402 | struct drm_i915_gem_object *obj; |
||
1403 | u32 *ptr; |
||
1404 | int ret; |
||
1405 | |||
1406 | obj = i915_gem_alloc_object(ring->dev, 4096); |
||
1407 | if (obj == NULL) |
||
1408 | return -ENOMEM; |
||
1409 | |||
1410 | ret = i915_gem_object_pin(obj, 4096, true); |
||
1411 | if (ret) { |
||
2344 | Serge | 1412 | drm_gem_object_unreference(&obj->base); |
2332 | Serge | 1413 | return ret; |
1414 | } |
||
1415 | |||
2344 | Serge | 1416 | ptr = MapIoMem(obj->pages[0], 4096, PG_SW); |
1417 | obj->mapped = ptr; |
||
1418 | |||
2332 | Serge | 1419 | *ptr++ = MI_BATCH_BUFFER_END; |
1420 | *ptr++ = MI_NOOP; |
||
1421 | |||
1422 | ret = i915_gem_object_set_to_gtt_domain(obj, false); |
||
1423 | if (ret) { |
||
2344 | Serge | 1424 | i915_gem_object_unpin(obj); |
1425 | drm_gem_object_unreference(&obj->base); |
||
1426 | FreeKernelSpace(ptr); |
||
1427 | obj->mapped = NULL; |
||
2332 | Serge | 1428 | return ret; |
1429 | } |
||
2344 | Serge | 1430 | FreeKernelSpace(ptr); |
1431 | obj->mapped = NULL; |
||
2332 | Serge | 1432 | |
1433 | ring->private = obj; |
||
1434 | } |
||
1435 | |||
1436 | return init_ring_common(ring); |
||
1437 | } |
||
1438 | |||
1439 | static int blt_ring_begin(struct intel_ring_buffer *ring, |
||
1440 | int num_dwords) |
||
1441 | { |
||
1442 | if (ring->private) { |
||
1443 | int ret = intel_ring_begin(ring, num_dwords+2); |
||
1444 | if (ret) |
||
1445 | return ret; |
||
1446 | |||
1447 | intel_ring_emit(ring, MI_BATCH_BUFFER_START); |
||
1448 | intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset); |
||
1449 | |||
1450 | return 0; |
||
1451 | } else |
||
1452 | return intel_ring_begin(ring, 4); |
||
1453 | } |
||
1454 | |||
1455 | static int blt_ring_flush(struct intel_ring_buffer *ring, |
||
1456 | u32 invalidate, u32 flush) |
||
1457 | { |
||
1458 | uint32_t cmd; |
||
1459 | int ret; |
||
1460 | |||
1461 | ret = blt_ring_begin(ring, 4); |
||
1462 | if (ret) |
||
1463 | return ret; |
||
1464 | |||
1465 | cmd = MI_FLUSH_DW; |
||
1466 | if (invalidate & I915_GEM_DOMAIN_RENDER) |
||
1467 | cmd |= MI_INVALIDATE_TLB; |
||
1468 | intel_ring_emit(ring, cmd); |
||
1469 | intel_ring_emit(ring, 0); |
||
1470 | intel_ring_emit(ring, 0); |
||
1471 | intel_ring_emit(ring, MI_NOOP); |
||
1472 | intel_ring_advance(ring); |
||
1473 | return 0; |
||
1474 | } |
||
1475 | |||
1476 | static void blt_ring_cleanup(struct intel_ring_buffer *ring) |
||
1477 | { |
||
1478 | if (!ring->private) |
||
1479 | return; |
||
1480 | |||
1481 | i915_gem_object_unpin(ring->private); |
||
1482 | drm_gem_object_unreference(ring->private); |
||
1483 | ring->private = NULL; |
||
1484 | } |
||
1485 | |||
1486 | static const struct intel_ring_buffer gen6_blt_ring = { |
||
1487 | .name = "blt ring", |
||
1488 | .id = RING_BLT, |
||
1489 | .mmio_base = BLT_RING_BASE, |
||
1490 | .size = 32 * PAGE_SIZE, |
||
1491 | .init = blt_ring_init, |
||
1492 | .write_tail = ring_write_tail, |
||
1493 | .flush = blt_ring_flush, |
||
2339 | Serge | 1494 | .add_request = gen6_add_request, |
2342 | Serge | 1495 | .get_seqno = gen6_ring_get_seqno, |
1496 | .irq_get = blt_ring_get_irq, |
||
1497 | .irq_put = blt_ring_put_irq, |
||
2340 | Serge | 1498 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
2332 | Serge | 1499 | // .cleanup = blt_ring_cleanup, |
2342 | Serge | 1500 | .sync_to = gen6_blt_ring_sync_to, |
1501 | .semaphore_register = {MI_SEMAPHORE_SYNC_BR, |
||
1502 | MI_SEMAPHORE_SYNC_BV, |
||
1503 | MI_SEMAPHORE_SYNC_INVALID}, |
||
1504 | .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC}, |
||
2332 | Serge | 1505 | }; |
1506 | |||
1507 | int intel_init_render_ring_buffer(struct drm_device *dev) |
||
1508 | { |
||
1509 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
1510 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
||
2340 | Serge | 1511 | |
2332 | Serge | 1512 | *ring = render_ring; |
1513 | if (INTEL_INFO(dev)->gen >= 6) { |
||
2339 | Serge | 1514 | ring->add_request = gen6_add_request; |
2342 | Serge | 1515 | ring->flush = gen6_render_ring_flush; |
1516 | ring->irq_get = gen6_render_ring_get_irq; |
||
1517 | ring->irq_put = gen6_render_ring_put_irq; |
||
1518 | ring->get_seqno = gen6_ring_get_seqno; |
||
2332 | Serge | 1519 | } else if (IS_GEN5(dev)) { |
2339 | Serge | 1520 | ring->add_request = pc_render_add_request; |
2342 | Serge | 1521 | ring->get_seqno = pc_render_get_seqno; |
2332 | Serge | 1522 | } |
1523 | |||
1524 | if (!I915_NEED_GFX_HWS(dev)) { |
||
1525 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; |
||
1526 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
||
1527 | } |
||
2340 | Serge | 1528 | |
2332 | Serge | 1529 | return intel_init_ring_buffer(dev, ring); |
1530 | } |
||
1531 | |||
1532 | |||
1533 | int intel_init_bsd_ring_buffer(struct drm_device *dev) |
||
1534 | { |
||
1535 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
1536 | struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; |
||
1537 | |||
1538 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
||
1539 | *ring = gen6_bsd_ring; |
||
1540 | else |
||
1541 | *ring = bsd_ring; |
||
1542 | |||
1543 | return intel_init_ring_buffer(dev, ring); |
||
1544 | } |
||
1545 | |||
1546 | int intel_init_blt_ring_buffer(struct drm_device *dev) |
||
1547 | { |
||
1548 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
1549 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; |
||
1550 | |||
1551 | *ring = gen6_blt_ring; |
||
1552 | |||
1553 | return intel_init_ring_buffer(dev, ring); |
||
1554 | }>>>><>><>><>><>><>><>><>><>><>>> |