Rev 5078 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
4075 | Serge | 1 | /************************************************************************** |
2 | * |
||
6296 | serge | 3 | * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA |
4075 | Serge | 4 | * All Rights Reserved. |
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the |
||
8 | * "Software"), to deal in the Software without restriction, including |
||
9 | * without limitation the rights to use, copy, modify, merge, publish, |
||
10 | * distribute, sub license, and/or sell copies of the Software, and to |
||
11 | * permit persons to whom the Software is furnished to do so, subject to |
||
12 | * the following conditions: |
||
13 | * |
||
14 | * The above copyright notice and this permission notice (including the |
||
15 | * next paragraph) shall be included in all copies or substantial portions |
||
16 | * of the Software. |
||
17 | * |
||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
25 | * |
||
26 | **************************************************************************/ |
||
4080 | Serge | 27 | |
4075 | Serge | 28 | #include "vmwgfx_drv.h" |
29 | #include |
||
30 | #include |
||
31 | |||
6296 | serge | 32 | struct vmw_temp_set_context { |
33 | SVGA3dCmdHeader header; |
||
34 | SVGA3dCmdDXTempSetContext body; |
||
35 | }; |
||
4075 | Serge | 36 | |
37 | bool vmw_fifo_have_3d(struct vmw_private *dev_priv) |
||
38 | { |
||
6296 | serge | 39 | u32 *fifo_mem = dev_priv->mmio_virt; |
4075 | Serge | 40 | uint32_t fifo_min, hwversion; |
41 | const struct vmw_fifo_state *fifo = &dev_priv->fifo; |
||
42 | |||
4569 | Serge | 43 | if (!(dev_priv->capabilities & SVGA_CAP_3D)) |
44 | return false; |
||
45 | |||
46 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { |
||
47 | uint32_t result; |
||
48 | |||
49 | if (!dev_priv->has_mob) |
||
50 | return false; |
||
51 | |||
6296 | serge | 52 | spin_lock(&dev_priv->cap_lock); |
4569 | Serge | 53 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); |
54 | result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); |
||
6296 | serge | 55 | spin_unlock(&dev_priv->cap_lock); |
4569 | Serge | 56 | |
57 | return (result != 0); |
||
58 | } |
||
59 | |||
4075 | Serge | 60 | if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) |
61 | return false; |
||
62 | |||
6296 | serge | 63 | fifo_min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN); |
4075 | Serge | 64 | if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) |
65 | return false; |
||
66 | |||
6296 | serge | 67 | hwversion = vmw_mmio_read(fifo_mem + |
68 | ((fifo->capabilities & |
||
69 | SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ? |
||
70 | SVGA_FIFO_3D_HWVERSION_REVISED : |
||
71 | SVGA_FIFO_3D_HWVERSION)); |
||
4075 | Serge | 72 | |
73 | if (hwversion == 0) |
||
74 | return false; |
||
75 | |||
76 | if (hwversion < SVGA3D_HWVERSION_WS8_B1) |
||
77 | return false; |
||
78 | |||
6296 | serge | 79 | /* Legacy Display Unit does not support surfaces */ |
80 | if (dev_priv->active_display_unit == vmw_du_legacy) |
||
4075 | Serge | 81 | return false; |
82 | |||
83 | return true; |
||
84 | } |
||
85 | |||
86 | bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) |
||
87 | { |
||
6296 | serge | 88 | u32 *fifo_mem = dev_priv->mmio_virt; |
4075 | Serge | 89 | uint32_t caps; |
90 | |||
91 | if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) |
||
92 | return false; |
||
93 | |||
6296 | serge | 94 | caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES); |
4075 | Serge | 95 | if (caps & SVGA_FIFO_CAP_PITCHLOCK) |
96 | return true; |
||
97 | |||
98 | return false; |
||
99 | } |
||
100 | |||
101 | int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) |
||
102 | { |
||
6296 | serge | 103 | u32 *fifo_mem = dev_priv->mmio_virt; |
4075 | Serge | 104 | uint32_t max; |
105 | uint32_t min; |
||
4080 | Serge | 106 | |
6296 | serge | 107 | fifo->dx = false; |
4075 | Serge | 108 | fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; |
6296 | serge | 109 | fifo->static_buffer = vmalloc(fifo->static_buffer_size); |
4075 | Serge | 110 | if (unlikely(fifo->static_buffer == NULL)) |
111 | return -ENOMEM; |
||
112 | |||
113 | fifo->dynamic_buffer = NULL; |
||
114 | fifo->reserved_size = 0; |
||
115 | fifo->using_bounce_buffer = false; |
||
116 | |||
117 | mutex_init(&fifo->fifo_mutex); |
||
6296 | serge | 118 | init_rwsem(&fifo->rwsem); |
4075 | Serge | 119 | |
120 | DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH)); |
||
121 | DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); |
||
122 | DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); |
||
123 | |||
124 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); |
||
125 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); |
||
126 | dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); |
||
127 | |||
6296 | serge | 128 | vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE | |
129 | SVGA_REG_ENABLE_HIDE); |
||
130 | vmw_write(dev_priv, SVGA_REG_TRACES, 0); |
||
131 | |||
4075 | Serge | 132 | min = 4; |
133 | if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO) |
||
134 | min = vmw_read(dev_priv, SVGA_REG_MEM_REGS); |
||
135 | min <<= 2; |
||
136 | |||
137 | if (min < PAGE_SIZE) |
||
138 | min = PAGE_SIZE; |
||
139 | |||
6296 | serge | 140 | vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN); |
141 | vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX); |
||
142 | wmb(); |
||
143 | vmw_mmio_write(min, fifo_mem + SVGA_FIFO_NEXT_CMD); |
||
144 | vmw_mmio_write(min, fifo_mem + SVGA_FIFO_STOP); |
||
145 | vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY); |
||
146 | mb(); |
||
4075 | Serge | 147 | |
6296 | serge | 148 | vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); |
4075 | Serge | 149 | |
6296 | serge | 150 | max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX); |
151 | min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN); |
||
152 | fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES); |
||
4075 | Serge | 153 | |
154 | DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n", |
||
155 | (unsigned int) max, |
||
156 | (unsigned int) min, |
||
157 | (unsigned int) fifo->capabilities); |
||
158 | |||
159 | atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); |
||
6296 | serge | 160 | vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE); |
161 | vmw_marker_queue_init(&fifo->marker_queue); |
||
4080 | Serge | 162 | |
6296 | serge | 163 | return 0; |
4075 | Serge | 164 | } |
165 | |||
166 | void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) |
||
167 | { |
||
6296 | serge | 168 | u32 *fifo_mem = dev_priv->mmio_virt; |
4075 | Serge | 169 | |
6296 | serge | 170 | preempt_disable(); |
171 | if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0) |
||
4075 | Serge | 172 | vmw_write(dev_priv, SVGA_REG_SYNC, reason); |
6296 | serge | 173 | preempt_enable(); |
4075 | Serge | 174 | } |
175 | |||
176 | void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) |
||
177 | { |
||
6296 | serge | 178 | u32 *fifo_mem = dev_priv->mmio_virt; |
4075 | Serge | 179 | |
6296 | serge | 180 | vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); |
4075 | Serge | 181 | while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) |
6296 | serge | 182 | ; |
4075 | Serge | 183 | |
6296 | serge | 184 | dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); |
4075 | Serge | 185 | |
186 | vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, |
||
187 | dev_priv->config_done_state); |
||
188 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
||
189 | dev_priv->enable_state); |
||
190 | vmw_write(dev_priv, SVGA_REG_TRACES, |
||
191 | dev_priv->traces_state); |
||
192 | |||
193 | vmw_marker_queue_takedown(&fifo->marker_queue); |
||
194 | |||
195 | if (likely(fifo->static_buffer != NULL)) { |
||
196 | vfree(fifo->static_buffer); |
||
197 | fifo->static_buffer = NULL; |
||
198 | } |
||
199 | |||
200 | if (likely(fifo->dynamic_buffer != NULL)) { |
||
201 | vfree(fifo->dynamic_buffer); |
||
202 | fifo->dynamic_buffer = NULL; |
||
203 | } |
||
204 | } |
||
205 | |||
206 | static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes) |
||
207 | { |
||
6296 | serge | 208 | u32 *fifo_mem = dev_priv->mmio_virt; |
209 | uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX); |
||
210 | uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD); |
||
211 | uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN); |
||
212 | uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP); |
||
4075 | Serge | 213 | |
214 | return ((max - next_cmd) + (stop - min) <= bytes); |
||
215 | } |
||
216 | |||
217 | static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv, |
||
218 | uint32_t bytes, bool interruptible, |
||
219 | unsigned long timeout) |
||
220 | { |
||
221 | int ret = 0; |
||
5078 | serge | 222 | unsigned long end_jiffies = jiffies + timeout; |
4570 | Serge | 223 | // DEFINE_WAIT(__wait); |
4075 | Serge | 224 | |
225 | DRM_INFO("Fifo wait noirq.\n"); |
||
226 | |||
227 | for (;;) { |
||
228 | // prepare_to_wait(&dev_priv->fifo_queue, &__wait, |
||
229 | // (interruptible) ? |
||
230 | // TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); |
||
231 | if (!vmw_fifo_is_full(dev_priv, bytes)) |
||
232 | break; |
||
5078 | serge | 233 | if (time_after_eq(jiffies, end_jiffies)) { |
4075 | Serge | 234 | ret = -EBUSY; |
235 | DRM_ERROR("SVGA device lockup.\n"); |
||
236 | break; |
||
237 | } |
||
238 | delay(1); |
||
239 | } |
||
240 | // finish_wait(&dev_priv->fifo_queue, &__wait); |
||
241 | wake_up_all(&dev_priv->fifo_queue); |
||
242 | DRM_INFO("Fifo noirq exit.\n"); |
||
243 | return ret; |
||
244 | } |
||
245 | |||
246 | static int vmw_fifo_wait(struct vmw_private *dev_priv, |
||
247 | uint32_t bytes, bool interruptible, |
||
248 | unsigned long timeout) |
||
249 | { |
||
250 | long ret = 1L; |
||
251 | |||
252 | if (likely(!vmw_fifo_is_full(dev_priv, bytes))) |
||
253 | return 0; |
||
254 | |||
255 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL); |
||
256 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) |
||
257 | return vmw_fifo_wait_noirq(dev_priv, bytes, |
||
258 | interruptible, timeout); |
||
259 | |||
6296 | serge | 260 | vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS, |
261 | &dev_priv->fifo_queue_waiters); |
||
4075 | Serge | 262 | |
263 | if (interruptible) |
||
264 | ret = wait_event_interruptible_timeout |
||
265 | (dev_priv->fifo_queue, |
||
266 | !vmw_fifo_is_full(dev_priv, bytes), timeout); |
||
267 | else |
||
268 | ret = wait_event_timeout |
||
269 | (dev_priv->fifo_queue, |
||
270 | !vmw_fifo_is_full(dev_priv, bytes), timeout); |
||
271 | |||
272 | if (unlikely(ret == 0)) |
||
273 | ret = -EBUSY; |
||
274 | else if (likely(ret > 0)) |
||
275 | ret = 0; |
||
276 | |||
6296 | serge | 277 | vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS, |
278 | &dev_priv->fifo_queue_waiters); |
||
4075 | Serge | 279 | |
280 | return ret; |
||
281 | } |
||
282 | |||
283 | /** |
||
284 | * Reserve @bytes number of bytes in the fifo. |
||
285 | * |
||
286 | * This function will return NULL (error) on two conditions: |
||
287 | * If it timeouts waiting for fifo space, or if @bytes is larger than the |
||
288 | * available fifo space. |
||
289 | * |
||
290 | * Returns: |
||
291 | * Pointer to the fifo, or null on error (possible hardware hang). |
||
292 | */ |
||
6296 | serge | 293 | static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, |
294 | uint32_t bytes) |
||
4075 | Serge | 295 | { |
296 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; |
||
6296 | serge | 297 | u32 *fifo_mem = dev_priv->mmio_virt; |
4075 | Serge | 298 | uint32_t max; |
299 | uint32_t min; |
||
300 | uint32_t next_cmd; |
||
301 | uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; |
||
302 | int ret; |
||
303 | |||
304 | mutex_lock(&fifo_state->fifo_mutex); |
||
6296 | serge | 305 | max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX); |
306 | min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN); |
||
307 | next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD); |
||
4075 | Serge | 308 | |
309 | if (unlikely(bytes >= (max - min))) |
||
310 | goto out_err; |
||
311 | |||
312 | BUG_ON(fifo_state->reserved_size != 0); |
||
313 | BUG_ON(fifo_state->dynamic_buffer != NULL); |
||
314 | |||
315 | fifo_state->reserved_size = bytes; |
||
316 | |||
317 | while (1) { |
||
6296 | serge | 318 | uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP); |
4075 | Serge | 319 | bool need_bounce = false; |
320 | bool reserve_in_place = false; |
||
321 | |||
322 | if (next_cmd >= stop) { |
||
323 | if (likely((next_cmd + bytes < max || |
||
324 | (next_cmd + bytes == max && stop > min)))) |
||
325 | reserve_in_place = true; |
||
326 | |||
327 | else if (vmw_fifo_is_full(dev_priv, bytes)) { |
||
328 | ret = vmw_fifo_wait(dev_priv, bytes, |
||
329 | false, 3 * HZ); |
||
330 | if (unlikely(ret != 0)) |
||
331 | goto out_err; |
||
332 | } else |
||
333 | need_bounce = true; |
||
334 | |||
335 | } else { |
||
336 | |||
337 | if (likely((next_cmd + bytes < stop))) |
||
338 | reserve_in_place = true; |
||
339 | else { |
||
340 | ret = vmw_fifo_wait(dev_priv, bytes, |
||
341 | false, 3 * HZ); |
||
342 | if (unlikely(ret != 0)) |
||
343 | goto out_err; |
||
344 | } |
||
345 | } |
||
346 | |||
347 | if (reserve_in_place) { |
||
348 | if (reserveable || bytes <= sizeof(uint32_t)) { |
||
349 | fifo_state->using_bounce_buffer = false; |
||
350 | |||
351 | if (reserveable) |
||
6296 | serge | 352 | vmw_mmio_write(bytes, fifo_mem + |
353 | SVGA_FIFO_RESERVED); |
||
354 | return (void __force *) (fifo_mem + |
||
355 | (next_cmd >> 2)); |
||
4075 | Serge | 356 | } else { |
357 | need_bounce = true; |
||
358 | } |
||
359 | } |
||
360 | |||
361 | if (need_bounce) { |
||
362 | fifo_state->using_bounce_buffer = true; |
||
363 | if (bytes < fifo_state->static_buffer_size) |
||
364 | return fifo_state->static_buffer; |
||
365 | else { |
||
6296 | serge | 366 | fifo_state->dynamic_buffer = vmalloc(bytes); |
4075 | Serge | 367 | return fifo_state->dynamic_buffer; |
368 | } |
||
369 | } |
||
370 | } |
||
371 | out_err: |
||
372 | fifo_state->reserved_size = 0; |
||
373 | mutex_unlock(&fifo_state->fifo_mutex); |
||
6296 | serge | 374 | |
4075 | Serge | 375 | return NULL; |
376 | } |
||
377 | |||
6296 | serge | 378 | void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, |
379 | int ctx_id) |
||
380 | { |
||
381 | void *ret; |
||
382 | |||
383 | if (dev_priv->cman) |
||
384 | ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes, |
||
385 | ctx_id, false, NULL); |
||
386 | else if (ctx_id == SVGA3D_INVALID_ID) |
||
387 | ret = vmw_local_fifo_reserve(dev_priv, bytes); |
||
388 | else { |
||
389 | WARN(1, "Command buffer has not been allocated.\n"); |
||
390 | ret = NULL; |
||
391 | } |
||
392 | if (IS_ERR_OR_NULL(ret)) { |
||
393 | DRM_ERROR("Fifo reserve failure of %u bytes.\n", |
||
394 | (unsigned) bytes); |
||
395 | // dump_stack(); |
||
396 | return NULL; |
||
397 | } |
||
398 | |||
399 | return ret; |
||
400 | } |
||
401 | |||
4075 | Serge | 402 | static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, |
6296 | serge | 403 | u32 *fifo_mem, |
4075 | Serge | 404 | uint32_t next_cmd, |
405 | uint32_t max, uint32_t min, uint32_t bytes) |
||
406 | { |
||
407 | uint32_t chunk_size = max - next_cmd; |
||
408 | uint32_t rest; |
||
409 | uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ? |
||
410 | fifo_state->dynamic_buffer : fifo_state->static_buffer; |
||
411 | |||
412 | if (bytes < chunk_size) |
||
413 | chunk_size = bytes; |
||
414 | |||
6296 | serge | 415 | vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED); |
416 | mb(); |
||
417 | memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size); |
||
4075 | Serge | 418 | rest = bytes - chunk_size; |
419 | if (rest) |
||
6296 | serge | 420 | memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest); |
4075 | Serge | 421 | } |
422 | |||
423 | static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, |
||
6296 | serge | 424 | u32 *fifo_mem, |
4075 | Serge | 425 | uint32_t next_cmd, |
426 | uint32_t max, uint32_t min, uint32_t bytes) |
||
427 | { |
||
428 | uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ? |
||
429 | fifo_state->dynamic_buffer : fifo_state->static_buffer; |
||
430 | |||
431 | while (bytes > 0) { |
||
6296 | serge | 432 | vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2)); |
4075 | Serge | 433 | next_cmd += sizeof(uint32_t); |
434 | if (unlikely(next_cmd == max)) |
||
435 | next_cmd = min; |
||
436 | mb(); |
||
6296 | serge | 437 | vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD); |
4075 | Serge | 438 | mb(); |
439 | bytes -= sizeof(uint32_t); |
||
440 | } |
||
441 | } |
||
442 | |||
6296 | serge | 443 | static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) |
4075 | Serge | 444 | { |
445 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; |
||
6296 | serge | 446 | u32 *fifo_mem = dev_priv->mmio_virt; |
447 | uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD); |
||
448 | uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX); |
||
449 | uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN); |
||
4075 | Serge | 450 | bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; |
451 | |||
6296 | serge | 452 | if (fifo_state->dx) |
453 | bytes += sizeof(struct vmw_temp_set_context); |
||
454 | |||
455 | fifo_state->dx = false; |
||
4075 | Serge | 456 | BUG_ON((bytes & 3) != 0); |
457 | BUG_ON(bytes > fifo_state->reserved_size); |
||
458 | |||
459 | fifo_state->reserved_size = 0; |
||
460 | |||
461 | if (fifo_state->using_bounce_buffer) { |
||
462 | if (reserveable) |
||
463 | vmw_fifo_res_copy(fifo_state, fifo_mem, |
||
464 | next_cmd, max, min, bytes); |
||
465 | else |
||
466 | vmw_fifo_slow_copy(fifo_state, fifo_mem, |
||
467 | next_cmd, max, min, bytes); |
||
468 | |||
469 | if (fifo_state->dynamic_buffer) { |
||
470 | vfree(fifo_state->dynamic_buffer); |
||
471 | fifo_state->dynamic_buffer = NULL; |
||
472 | } |
||
473 | |||
474 | } |
||
475 | |||
6296 | serge | 476 | down_write(&fifo_state->rwsem); |
4075 | Serge | 477 | if (fifo_state->using_bounce_buffer || reserveable) { |
478 | next_cmd += bytes; |
||
479 | if (next_cmd >= max) |
||
480 | next_cmd -= max - min; |
||
481 | mb(); |
||
6296 | serge | 482 | vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD); |
4075 | Serge | 483 | } |
484 | |||
485 | if (reserveable) |
||
6296 | serge | 486 | vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED); |
487 | mb(); |
||
488 | up_write(&fifo_state->rwsem); |
||
4075 | Serge | 489 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); |
490 | mutex_unlock(&fifo_state->fifo_mutex); |
||
491 | } |
||
492 | |||
6296 | serge | 493 | void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) |
494 | { |
||
495 | if (dev_priv->cman) |
||
496 | vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false); |
||
497 | else |
||
498 | vmw_local_fifo_commit(dev_priv, bytes); |
||
499 | } |
||
500 | |||
501 | |||
502 | /** |
||
503 | * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands. |
||
504 | * |
||
505 | * @dev_priv: Pointer to device private structure. |
||
506 | * @bytes: Number of bytes to commit. |
||
507 | */ |
||
508 | void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes) |
||
509 | { |
||
510 | if (dev_priv->cman) |
||
511 | vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true); |
||
512 | else |
||
513 | vmw_local_fifo_commit(dev_priv, bytes); |
||
514 | } |
||
515 | |||
516 | /** |
||
517 | * vmw_fifo_flush - Flush any buffered commands and make sure command processing |
||
518 | * starts. |
||
519 | * |
||
520 | * @dev_priv: Pointer to device private structure. |
||
521 | * @interruptible: Whether to wait interruptible if function needs to sleep. |
||
522 | */ |
||
523 | int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible) |
||
524 | { |
||
525 | might_sleep(); |
||
526 | |||
527 | if (dev_priv->cman) |
||
528 | return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible); |
||
529 | else |
||
530 | return 0; |
||
531 | } |
||
532 | |||
4075 | Serge | 533 | int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) |
534 | { |
||
535 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; |
||
536 | struct svga_fifo_cmd_fence *cmd_fence; |
||
6296 | serge | 537 | u32 *fm; |
4075 | Serge | 538 | int ret = 0; |
6296 | serge | 539 | uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence); |
4075 | Serge | 540 | |
541 | fm = vmw_fifo_reserve(dev_priv, bytes); |
||
542 | if (unlikely(fm == NULL)) { |
||
543 | *seqno = atomic_read(&dev_priv->marker_seq); |
||
544 | ret = -ENOMEM; |
||
545 | (void)vmw_fallback_wait(dev_priv, false, true, *seqno, |
||
546 | false, 3*HZ); |
||
547 | goto out_err; |
||
548 | } |
||
549 | |||
550 | do { |
||
551 | *seqno = atomic_add_return(1, &dev_priv->marker_seq); |
||
552 | } while (*seqno == 0); |
||
553 | |||
554 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { |
||
555 | |||
556 | /* |
||
557 | * Don't request hardware to send a fence. The |
||
558 | * waiting code in vmwgfx_irq.c will emulate this. |
||
559 | */ |
||
560 | |||
561 | vmw_fifo_commit(dev_priv, 0); |
||
562 | return 0; |
||
563 | } |
||
564 | |||
6296 | serge | 565 | *fm++ = SVGA_CMD_FENCE; |
566 | cmd_fence = (struct svga_fifo_cmd_fence *) fm; |
||
567 | cmd_fence->fence = *seqno; |
||
568 | vmw_fifo_commit_flush(dev_priv, bytes); |
||
4075 | Serge | 569 | (void) vmw_marker_push(&fifo_state->marker_queue, *seqno); |
570 | vmw_update_seqno(dev_priv, fifo_state); |
||
571 | |||
572 | out_err: |
||
573 | return ret; |
||
574 | } |
||
575 | |||
576 | /** |
||
4569 | Serge | 577 | * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using |
578 | * legacy query commands. |
||
4075 | Serge | 579 | * |
580 | * @dev_priv: The device private structure. |
||
581 | * @cid: The hardware context id used for the query. |
||
582 | * |
||
4569 | Serge | 583 | * See the vmw_fifo_emit_dummy_query documentation. |
4075 | Serge | 584 | */ |
4569 | Serge | 585 | static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv, |
6296 | serge | 586 | uint32_t cid) |
4075 | Serge | 587 | { |
588 | /* |
||
589 | * A query wait without a preceding query end will |
||
590 | * actually finish all queries for this cid |
||
591 | * without writing to the query result structure. |
||
592 | */ |
||
593 | |||
6296 | serge | 594 | struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base; |
4075 | Serge | 595 | struct { |
596 | SVGA3dCmdHeader header; |
||
597 | SVGA3dCmdWaitForQuery body; |
||
598 | } *cmd; |
||
599 | |||
600 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
||
601 | |||
602 | if (unlikely(cmd == NULL)) { |
||
603 | DRM_ERROR("Out of fifo space for dummy query.\n"); |
||
604 | return -ENOMEM; |
||
605 | } |
||
606 | |||
607 | cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY; |
||
608 | cmd->header.size = sizeof(cmd->body); |
||
609 | cmd->body.cid = cid; |
||
610 | cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; |
||
611 | |||
612 | if (bo->mem.mem_type == TTM_PL_VRAM) { |
||
613 | cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER; |
||
614 | cmd->body.guestResult.offset = bo->offset; |
||
615 | } else { |
||
616 | cmd->body.guestResult.gmrId = bo->mem.start; |
||
617 | cmd->body.guestResult.offset = 0; |
||
618 | } |
||
619 | |||
620 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
||
621 | |||
622 | return 0; |
||
623 | } |
||
4569 | Serge | 624 | |
625 | /** |
||
626 | * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using |
||
627 | * guest-backed resource query commands. |
||
628 | * |
||
629 | * @dev_priv: The device private structure. |
||
630 | * @cid: The hardware context id used for the query. |
||
631 | * |
||
632 | * See the vmw_fifo_emit_dummy_query documentation. |
||
633 | */ |
||
634 | static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv, |
||
635 | uint32_t cid) |
||
636 | { |
||
637 | /* |
||
638 | * A query wait without a preceding query end will |
||
639 | * actually finish all queries for this cid |
||
640 | * without writing to the query result structure. |
||
641 | */ |
||
642 | |||
6296 | serge | 643 | struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base; |
4569 | Serge | 644 | struct { |
645 | SVGA3dCmdHeader header; |
||
646 | SVGA3dCmdWaitForGBQuery body; |
||
647 | } *cmd; |
||
648 | |||
649 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
||
650 | |||
651 | if (unlikely(cmd == NULL)) { |
||
652 | DRM_ERROR("Out of fifo space for dummy query.\n"); |
||
653 | return -ENOMEM; |
||
654 | } |
||
655 | |||
656 | cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; |
||
657 | cmd->header.size = sizeof(cmd->body); |
||
658 | cmd->body.cid = cid; |
||
659 | cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; |
||
660 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); |
||
661 | cmd->body.mobid = bo->mem.start; |
||
662 | cmd->body.offset = 0; |
||
663 | |||
664 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
||
665 | |||
666 | return 0; |
||
667 | } |
||
668 | |||
669 | |||
670 | /** |
||
671 | * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using |
||
672 | * appropriate resource query commands. |
||
673 | * |
||
674 | * @dev_priv: The device private structure. |
||
675 | * @cid: The hardware context id used for the query. |
||
676 | * |
||
677 | * This function is used to emit a dummy occlusion query with |
||
678 | * no primitives rendered between query begin and query end. |
||
679 | * It's used to provide a query barrier, in order to know that when |
||
680 | * this query is finished, all preceding queries are also finished. |
||
681 | * |
||
682 | * A Query results structure should have been initialized at the start |
||
683 | * of the dev_priv->dummy_query_bo buffer object. And that buffer object |
||
684 | * must also be either reserved or pinned when this function is called. |
||
685 | * |
||
686 | * Returns -ENOMEM on failure to reserve fifo space. |
||
687 | */ |
||
688 | int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, |
||
689 | uint32_t cid) |
||
690 | { |
||
691 | if (dev_priv->has_mob) |
||
692 | return vmw_fifo_emit_dummy_gb_query(dev_priv, cid); |
||
693 | |||
694 | return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); |
||
695 | } |
||
6296 | serge | 696 | |
697 | void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) |
||
698 | { |
||
699 | return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID); |
||
700 | }>>=>>>=>>=><=>>=> |