Subversion Repositories Kolibri OS

Rev

Rev 4075 | Rev 4569 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4075 Serge 1
/**************************************************************************
2
 *
3
 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
#define mb()    asm volatile("mfence" : : : "memory")
4080 Serge 28
#define rmb()   asm volatile("lfence" : : : "memory")
29
#define wmb()   asm volatile("sfence" : : : "memory")
30
 
4075 Serge 31
#include "vmwgfx_drv.h"
32
#include 
33
#include 
34
 
35
#define TASK_INTERRUPTIBLE      1
36
#define TASK_UNINTERRUPTIBLE    2
37
 
38
bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
39
{
40
	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
41
	uint32_t fifo_min, hwversion;
42
	const struct vmw_fifo_state *fifo = &dev_priv->fifo;
43
 
44
	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
45
		return false;
46
 
47
	fifo_min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
48
	if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
49
		return false;
50
 
51
	hwversion = ioread32(fifo_mem +
52
			     ((fifo->capabilities &
53
			       SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
54
			      SVGA_FIFO_3D_HWVERSION_REVISED :
55
			      SVGA_FIFO_3D_HWVERSION));
56
 
57
	if (hwversion == 0)
58
		return false;
59
 
60
	if (hwversion < SVGA3D_HWVERSION_WS8_B1)
61
		return false;
62
 
63
	/* Non-Screen Object path does not support surfaces */
64
	if (!dev_priv->sou_priv)
65
		return false;
66
 
67
	return true;
68
}
69
 
70
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
71
{
72
	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
73
	uint32_t caps;
74
 
75
	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
76
		return false;
77
 
78
	caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
79
	if (caps & SVGA_FIFO_CAP_PITCHLOCK)
80
		return true;
81
 
82
	return false;
83
}
84
 
85
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
86
{
87
	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
88
	uint32_t max;
89
	uint32_t min;
90
	uint32_t dummy;
4080 Serge 91
 
4075 Serge 92
    ENTER();
4080 Serge 93
 
4075 Serge 94
	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
95
    fifo->static_buffer = KernelAlloc(fifo->static_buffer_size);
96
	if (unlikely(fifo->static_buffer == NULL))
97
		return -ENOMEM;
98
 
99
	fifo->dynamic_buffer = NULL;
100
	fifo->reserved_size = 0;
101
	fifo->using_bounce_buffer = false;
102
 
103
	mutex_init(&fifo->fifo_mutex);
104
//   init_rwsem(&fifo->rwsem);
105
 
106
	/*
107
	 * Allow mapping the first page read-only to user-space.
108
	 */
109
 
110
	DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
111
	DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
112
	DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
113
 
114
	mutex_lock(&dev_priv->hw_mutex);
115
	dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
116
	dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
117
	dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
118
	vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
119
 
120
	min = 4;
121
	if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
122
		min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
123
	min <<= 2;
124
 
125
	if (min < PAGE_SIZE)
126
		min = PAGE_SIZE;
127
 
128
	iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
129
	iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
130
    wmb();
131
	iowrite32(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
132
	iowrite32(min,  fifo_mem + SVGA_FIFO_STOP);
133
	iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
134
    mb();
135
 
136
    vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
137
	mutex_unlock(&dev_priv->hw_mutex);
138
 
139
	max = ioread32(fifo_mem + SVGA_FIFO_MAX);
140
	min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
141
	fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
142
 
143
	DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
144
		 (unsigned int) max,
145
		 (unsigned int) min,
146
		 (unsigned int) fifo->capabilities);
147
 
148
	atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
149
	iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
150
    vmw_marker_queue_init(&fifo->marker_queue);
4080 Serge 151
 
152
    int ret = 0; //vmw_fifo_send_fence(dev_priv, &dummy);
153
    LEAVE();
154
    return ret;
4075 Serge 155
}
156
 
157
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
158
{
159
	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
160
 
161
	mutex_lock(&dev_priv->hw_mutex);
162
 
163
	if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
164
		iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
165
		vmw_write(dev_priv, SVGA_REG_SYNC, reason);
166
	}
167
 
168
	mutex_unlock(&dev_priv->hw_mutex);
169
}
170
 
171
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
172
{
173
	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
174
 
175
	mutex_lock(&dev_priv->hw_mutex);
176
 
177
	while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
178
		vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
179
 
180
	dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
181
 
182
	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
183
		  dev_priv->config_done_state);
184
	vmw_write(dev_priv, SVGA_REG_ENABLE,
185
		  dev_priv->enable_state);
186
	vmw_write(dev_priv, SVGA_REG_TRACES,
187
		  dev_priv->traces_state);
188
 
189
	mutex_unlock(&dev_priv->hw_mutex);
190
	vmw_marker_queue_takedown(&fifo->marker_queue);
191
 
192
	if (likely(fifo->static_buffer != NULL)) {
193
		vfree(fifo->static_buffer);
194
		fifo->static_buffer = NULL;
195
	}
196
 
197
	if (likely(fifo->dynamic_buffer != NULL)) {
198
		vfree(fifo->dynamic_buffer);
199
		fifo->dynamic_buffer = NULL;
200
	}
201
}
202
 
203
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
204
{
205
	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
206
	uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
207
	uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
208
	uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
209
	uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
210
 
211
	return ((max - next_cmd) + (stop - min) <= bytes);
212
}
213
 
214
static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
215
			       uint32_t bytes, bool interruptible,
216
			       unsigned long timeout)
217
{
218
	int ret = 0;
219
    unsigned long end_jiffies = GetTimerTicks() + timeout;
220
	DEFINE_WAIT(__wait);
221
 
222
	DRM_INFO("Fifo wait noirq.\n");
223
 
224
	for (;;) {
225
//       prepare_to_wait(&dev_priv->fifo_queue, &__wait,
226
//               (interruptible) ?
227
//               TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
228
		if (!vmw_fifo_is_full(dev_priv, bytes))
229
			break;
230
        if (time_after_eq(GetTimerTicks(), end_jiffies)) {
231
			ret = -EBUSY;
232
			DRM_ERROR("SVGA device lockup.\n");
233
			break;
234
		}
235
        delay(1);
236
	}
237
//   finish_wait(&dev_priv->fifo_queue, &__wait);
238
	wake_up_all(&dev_priv->fifo_queue);
239
	DRM_INFO("Fifo noirq exit.\n");
240
	return ret;
241
}
242
 
243
static int vmw_fifo_wait(struct vmw_private *dev_priv,
244
			 uint32_t bytes, bool interruptible,
245
			 unsigned long timeout)
246
{
247
	long ret = 1L;
248
	unsigned long irq_flags;
249
 
250
	if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
251
		return 0;
252
 
253
	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
254
	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
255
		return vmw_fifo_wait_noirq(dev_priv, bytes,
256
					   interruptible, timeout);
257
 
258
	mutex_lock(&dev_priv->hw_mutex);
259
	if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
260
		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
261
		outl(SVGA_IRQFLAG_FIFO_PROGRESS,
262
		     dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
263
		dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
264
		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
265
		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
266
	}
267
	mutex_unlock(&dev_priv->hw_mutex);
268
 
269
	if (interruptible)
270
		ret = wait_event_interruptible_timeout
271
		    (dev_priv->fifo_queue,
272
		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
273
	else
274
		ret = wait_event_timeout
275
		    (dev_priv->fifo_queue,
276
		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
277
 
278
	if (unlikely(ret == 0))
279
		ret = -EBUSY;
280
	else if (likely(ret > 0))
281
		ret = 0;
282
 
283
	mutex_lock(&dev_priv->hw_mutex);
284
	if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
285
		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
286
		dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
287
		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
288
		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
289
	}
290
	mutex_unlock(&dev_priv->hw_mutex);
291
 
292
	return ret;
293
}
294
 
295
/**
296
 * Reserve @bytes number of bytes in the fifo.
297
 *
298
 * This function will return NULL (error) on two conditions:
299
 *  If it timeouts waiting for fifo space, or if @bytes is larger than the
300
 *   available fifo space.
301
 *
302
 * Returns:
303
 *   Pointer to the fifo, or null on error (possible hardware hang).
304
 */
305
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
306
{
307
	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
308
	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
309
	uint32_t max;
310
	uint32_t min;
311
	uint32_t next_cmd;
312
	uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
313
	int ret;
314
 
315
	mutex_lock(&fifo_state->fifo_mutex);
316
	max = ioread32(fifo_mem + SVGA_FIFO_MAX);
317
	min = ioread32(fifo_mem + SVGA_FIFO_MIN);
318
	next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
319
 
320
	if (unlikely(bytes >= (max - min)))
321
		goto out_err;
322
 
323
	BUG_ON(fifo_state->reserved_size != 0);
324
	BUG_ON(fifo_state->dynamic_buffer != NULL);
325
 
326
	fifo_state->reserved_size = bytes;
327
 
328
	while (1) {
329
		uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
330
		bool need_bounce = false;
331
		bool reserve_in_place = false;
332
 
333
		if (next_cmd >= stop) {
334
			if (likely((next_cmd + bytes < max ||
335
				    (next_cmd + bytes == max && stop > min))))
336
				reserve_in_place = true;
337
 
338
			else if (vmw_fifo_is_full(dev_priv, bytes)) {
339
				ret = vmw_fifo_wait(dev_priv, bytes,
340
						    false, 3 * HZ);
341
				if (unlikely(ret != 0))
342
					goto out_err;
343
			} else
344
				need_bounce = true;
345
 
346
		} else {
347
 
348
			if (likely((next_cmd + bytes < stop)))
349
				reserve_in_place = true;
350
			else {
351
				ret = vmw_fifo_wait(dev_priv, bytes,
352
						    false, 3 * HZ);
353
				if (unlikely(ret != 0))
354
					goto out_err;
355
			}
356
		}
357
 
358
		if (reserve_in_place) {
359
			if (reserveable || bytes <= sizeof(uint32_t)) {
360
				fifo_state->using_bounce_buffer = false;
361
 
362
				if (reserveable)
363
					iowrite32(bytes, fifo_mem +
364
						  SVGA_FIFO_RESERVED);
365
				return fifo_mem + (next_cmd >> 2);
366
			} else {
367
				need_bounce = true;
368
			}
369
		}
370
 
371
		if (need_bounce) {
372
			fifo_state->using_bounce_buffer = true;
373
			if (bytes < fifo_state->static_buffer_size)
374
				return fifo_state->static_buffer;
375
			else {
376
                fifo_state->dynamic_buffer = kmalloc(bytes,0);
377
				return fifo_state->dynamic_buffer;
378
			}
379
		}
380
	}
381
out_err:
382
	fifo_state->reserved_size = 0;
383
	mutex_unlock(&fifo_state->fifo_mutex);
384
	return NULL;
385
}
386
 
387
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
388
			      __le32 __iomem *fifo_mem,
389
			      uint32_t next_cmd,
390
			      uint32_t max, uint32_t min, uint32_t bytes)
391
{
392
	uint32_t chunk_size = max - next_cmd;
393
	uint32_t rest;
394
	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
395
	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
396
 
397
	if (bytes < chunk_size)
398
		chunk_size = bytes;
399
 
400
	iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
4080 Serge 401
    mb();
4075 Serge 402
    memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
403
	rest = bytes - chunk_size;
404
	if (rest)
405
        memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
406
			    rest);
407
}
408
 
409
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
410
			       __le32 __iomem *fifo_mem,
411
			       uint32_t next_cmd,
412
			       uint32_t max, uint32_t min, uint32_t bytes)
413
{
414
	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
415
	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
416
 
417
	while (bytes > 0) {
418
		iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
419
		next_cmd += sizeof(uint32_t);
420
		if (unlikely(next_cmd == max))
421
			next_cmd = min;
422
		mb();
423
		iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
424
		mb();
425
		bytes -= sizeof(uint32_t);
426
	}
427
}
428
 
429
void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
430
{
431
	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
432
	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
433
	uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
434
	uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
435
	uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
436
	bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
437
 
438
	BUG_ON((bytes & 3) != 0);
439
	BUG_ON(bytes > fifo_state->reserved_size);
440
 
441
	fifo_state->reserved_size = 0;
442
 
443
	if (fifo_state->using_bounce_buffer) {
444
		if (reserveable)
445
			vmw_fifo_res_copy(fifo_state, fifo_mem,
446
					  next_cmd, max, min, bytes);
447
		else
448
			vmw_fifo_slow_copy(fifo_state, fifo_mem,
449
					   next_cmd, max, min, bytes);
450
 
451
		if (fifo_state->dynamic_buffer) {
452
			vfree(fifo_state->dynamic_buffer);
453
			fifo_state->dynamic_buffer = NULL;
454
		}
455
 
456
	}
457
 
458
//   down_write(&fifo_state->rwsem);
459
	if (fifo_state->using_bounce_buffer || reserveable) {
460
		next_cmd += bytes;
461
		if (next_cmd >= max)
462
			next_cmd -= max - min;
463
		mb();
464
		iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
465
	}
466
 
467
	if (reserveable)
468
		iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
4080 Serge 469
    mb();
4075 Serge 470
//   up_write(&fifo_state->rwsem);
471
	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
472
	mutex_unlock(&fifo_state->fifo_mutex);
473
}
474
 
475
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
476
{
477
	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
478
	struct svga_fifo_cmd_fence *cmd_fence;
479
	void *fm;
480
	int ret = 0;
481
	uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
482
 
483
	fm = vmw_fifo_reserve(dev_priv, bytes);
484
	if (unlikely(fm == NULL)) {
485
		*seqno = atomic_read(&dev_priv->marker_seq);
486
		ret = -ENOMEM;
487
		(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
488
					false, 3*HZ);
489
		goto out_err;
490
	}
491
 
492
	do {
493
		*seqno = atomic_add_return(1, &dev_priv->marker_seq);
494
	} while (*seqno == 0);
495
 
496
	if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
497
 
498
		/*
499
		 * Don't request hardware to send a fence. The
500
		 * waiting code in vmwgfx_irq.c will emulate this.
501
		 */
502
 
503
		vmw_fifo_commit(dev_priv, 0);
504
		return 0;
505
	}
506
 
507
	*(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
508
	cmd_fence = (struct svga_fifo_cmd_fence *)
509
	    ((unsigned long)fm + sizeof(__le32));
510
 
511
	iowrite32(*seqno, &cmd_fence->fence);
512
	vmw_fifo_commit(dev_priv, bytes);
513
	(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
514
	vmw_update_seqno(dev_priv, fifo_state);
515
 
516
out_err:
517
	return ret;
518
}
519
 
520
/**
521
 * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo.
522
 *
523
 * @dev_priv: The device private structure.
524
 * @cid: The hardware context id used for the query.
525
 *
526
 * This function is used to emit a dummy occlusion query with
527
 * no primitives rendered between query begin and query end.
528
 * It's used to provide a query barrier, in order to know that when
529
 * this query is finished, all preceding queries are also finished.
530
 *
531
 * A Query results structure should have been initialized at the start
532
 * of the dev_priv->dummy_query_bo buffer object. And that buffer object
533
 * must also be either reserved or pinned when this function is called.
534
 *
535
 * Returns -ENOMEM on failure to reserve fifo space.
536
 */
537
int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
538
			      uint32_t cid)
539
{
540
	/*
541
	 * A query wait without a preceding query end will
542
	 * actually finish all queries for this cid
543
	 * without writing to the query result structure.
544
	 */
545
 
546
	struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
547
	struct {
548
		SVGA3dCmdHeader header;
549
		SVGA3dCmdWaitForQuery body;
550
	} *cmd;
551
 
552
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
553
 
554
	if (unlikely(cmd == NULL)) {
555
		DRM_ERROR("Out of fifo space for dummy query.\n");
556
		return -ENOMEM;
557
	}
558
 
559
	cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
560
	cmd->header.size = sizeof(cmd->body);
561
	cmd->body.cid = cid;
562
	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
563
 
564
	if (bo->mem.mem_type == TTM_PL_VRAM) {
565
		cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
566
		cmd->body.guestResult.offset = bo->offset;
567
	} else {
568
		cmd->body.guestResult.gmrId = bo->mem.start;
569
		cmd->body.guestResult.offset = 0;
570
	}
571
 
572
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
573
 
574
	return 0;
575
}