Subversion Repositories Kolibri OS

Rev

Rev 5078 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4569 Serge 1
/**************************************************************************
2
 *
6296 serge 3
 * Copyright © 2012-2015 VMware, Inc., Palo Alto, CA., USA
4569 Serge 4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
 
28
#include "vmwgfx_drv.h"
29
 
30
/*
31
 * If we set up the screen target otable, screen objects stop working.
32
 */
33
 
6296 serge 34
#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
4569 Serge 35
 
36
#ifdef CONFIG_64BIT
37
#define VMW_PPN_SIZE 8
38
#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0
39
#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1
40
#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2
41
#else
42
#define VMW_PPN_SIZE 4
43
#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0
44
#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1
45
#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2
46
#endif
47
 
48
/*
49
 * struct vmw_mob - Structure containing page table and metadata for a
50
 * Guest Memory OBject.
51
 *
52
 * @num_pages       Number of pages that make up the page table.
53
 * @pt_level        The indirection level of the page table. 0-2.
54
 * @pt_root_page    DMA address of the level 0 page of the page table.
55
 */
56
struct vmw_mob {
57
	struct ttm_buffer_object *pt_bo;
58
	unsigned long num_pages;
59
	unsigned pt_level;
60
	dma_addr_t pt_root_page;
61
	uint32_t id;
62
};
63
 
64
/*
65
 * struct vmw_otable - Guest Memory OBject table metadata
66
 *
67
 * @size:           Size of the table (page-aligned).
68
 * @page_table:     Pointer to a struct vmw_mob holding the page table.
69
 */
6296 serge 70
static const struct vmw_otable pre_dx_tables[] = {
71
	{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
72
	{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
73
	{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
74
	{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
75
	{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
76
	 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
4569 Serge 77
};
78
 
6296 serge 79
static const struct vmw_otable dx_tables[] = {
80
	{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
81
	{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
82
	{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
83
	{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
84
	{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
85
	 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
86
	{VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
87
};
88
 
4569 Serge 89
static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
90
			       struct vmw_mob *mob);
91
static void vmw_mob_pt_setup(struct vmw_mob *mob,
92
			     struct vmw_piter data_iter,
93
			     unsigned long num_data_pages);
94
 
95
/*
96
 * vmw_setup_otable_base - Issue an object table base setup command to
97
 * the device
98
 *
99
 * @dev_priv:       Pointer to a device private structure
100
 * @type:           Type of object table base
101
 * @offset          Start of table offset into dev_priv::otable_bo
102
 * @otable          Pointer to otable metadata;
103
 *
104
 * This function returns -ENOMEM if it fails to reserve fifo space,
105
 * and may block waiting for fifo space.
106
 */
107
static int vmw_setup_otable_base(struct vmw_private *dev_priv,
108
				 SVGAOTableType type,
6296 serge 109
				 struct ttm_buffer_object *otable_bo,
4569 Serge 110
				 unsigned long offset,
111
				 struct vmw_otable *otable)
112
{
113
	struct {
114
		SVGA3dCmdHeader header;
115
		SVGA3dCmdSetOTableBase64 body;
116
	} *cmd;
117
	struct vmw_mob *mob;
118
	const struct vmw_sg_table *vsgt;
119
	struct vmw_piter iter;
120
	int ret;
121
 
122
	BUG_ON(otable->page_table != NULL);
123
 
6296 serge 124
	vsgt = vmw_bo_sg_table(otable_bo);
4569 Serge 125
	vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
126
	WARN_ON(!vmw_piter_next(&iter));
127
 
128
	mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
129
	if (unlikely(mob == NULL)) {
130
		DRM_ERROR("Failed creating OTable page table.\n");
131
		return -ENOMEM;
132
	}
133
 
134
	if (otable->size <= PAGE_SIZE) {
135
		mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
136
		mob->pt_root_page = vmw_piter_dma_addr(&iter);
137
	} else if (vsgt->num_regions == 1) {
138
		mob->pt_level = SVGA3D_MOBFMT_RANGE;
139
		mob->pt_root_page = vmw_piter_dma_addr(&iter);
140
	} else {
141
		ret = vmw_mob_pt_populate(dev_priv, mob);
142
		if (unlikely(ret != 0))
143
			goto out_no_populate;
144
 
145
		vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
146
		mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
147
	}
148
 
149
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
150
	if (unlikely(cmd == NULL)) {
151
		DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
5078 serge 152
		ret = -ENOMEM;
4569 Serge 153
		goto out_no_fifo;
154
	}
155
 
156
	memset(cmd, 0, sizeof(*cmd));
157
	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
158
	cmd->header.size = sizeof(cmd->body);
159
	cmd->body.type = type;
6296 serge 160
	cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
4569 Serge 161
	cmd->body.sizeInBytes = otable->size;
162
	cmd->body.validSizeInBytes = 0;
163
	cmd->body.ptDepth = mob->pt_level;
164
 
165
	/*
166
	 * The device doesn't support this, But the otable size is
167
	 * determined at compile-time, so this BUG shouldn't trigger
168
	 * randomly.
169
	 */
170
	BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
171
 
172
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
173
	otable->page_table = mob;
174
 
175
	return 0;
176
 
177
out_no_fifo:
178
out_no_populate:
179
	vmw_mob_destroy(mob);
180
	return ret;
181
}
182
 
183
/*
184
 * vmw_takedown_otable_base - Issue an object table base takedown command
185
 * to the device
186
 *
187
 * @dev_priv:       Pointer to a device private structure
188
 * @type:           Type of object table base
189
 *
190
 */
191
static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
192
				     SVGAOTableType type,
193
				     struct vmw_otable *otable)
194
{
195
	struct {
196
		SVGA3dCmdHeader header;
197
		SVGA3dCmdSetOTableBase body;
198
	} *cmd;
199
	struct ttm_buffer_object *bo;
200
 
201
	if (otable->page_table == NULL)
202
		return;
203
 
204
	bo = otable->page_table->pt_bo;
205
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
5078 serge 206
	if (unlikely(cmd == NULL)) {
207
		DRM_ERROR("Failed reserving FIFO space for OTable "
208
			  "takedown.\n");
6296 serge 209
		return;
210
	}
211
 
4569 Serge 212
	memset(cmd, 0, sizeof(*cmd));
213
	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
214
	cmd->header.size = sizeof(cmd->body);
215
	cmd->body.type = type;
216
	cmd->body.baseAddress = 0;
217
	cmd->body.sizeInBytes = 0;
218
	cmd->body.validSizeInBytes = 0;
219
	cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
220
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
221
 
222
	if (bo) {
223
		int ret;
224
 
225
		ret = ttm_bo_reserve(bo, false, true, false, NULL);
226
		BUG_ON(ret != 0);
227
 
228
		vmw_fence_single_bo(bo, NULL);
229
		ttm_bo_unreserve(bo);
230
	}
231
 
232
	vmw_mob_destroy(otable->page_table);
233
	otable->page_table = NULL;
234
}
235
 
6296 serge 236
 
237
static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
238
				  struct vmw_otable_batch *batch)
4569 Serge 239
{
240
	unsigned long offset;
241
	unsigned long bo_size;
6296 serge 242
	struct vmw_otable *otables = batch->otables;
4569 Serge 243
	SVGAOTableType i;
244
	int ret;
245
 
6296 serge 246
	bo_size = 0;
247
	for (i = 0; i < batch->num_otables; ++i) {
248
		if (!otables[i].enabled)
249
			continue;
4569 Serge 250
 
251
		otables[i].size =
252
			(otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
253
		bo_size += otables[i].size;
254
	}
255
 
256
	ret = ttm_bo_create(&dev_priv->bdev, bo_size,
257
			    ttm_bo_type_device,
258
			    &vmw_sys_ne_placement,
259
			    0, false, NULL,
6296 serge 260
			    &batch->otable_bo);
4569 Serge 261
 
262
	if (unlikely(ret != 0))
263
		goto out_no_bo;
264
 
6296 serge 265
	ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL);
4569 Serge 266
	BUG_ON(ret != 0);
6296 serge 267
	ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
4569 Serge 268
	if (unlikely(ret != 0))
269
		goto out_unreserve;
6296 serge 270
	ret = vmw_bo_map_dma(batch->otable_bo);
4569 Serge 271
	if (unlikely(ret != 0))
272
		goto out_unreserve;
273
 
6296 serge 274
	ttm_bo_unreserve(batch->otable_bo);
4569 Serge 275
 
276
	offset = 0;
6296 serge 277
	for (i = 0; i < batch->num_otables; ++i) {
278
		if (!batch->otables[i].enabled)
279
			continue;
280
 
281
		ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
282
					    offset,
4569 Serge 283
					    &otables[i]);
284
		if (unlikely(ret != 0))
285
			goto out_no_setup;
286
		offset += otables[i].size;
287
	}
288
 
289
	return 0;
290
 
291
out_unreserve:
6296 serge 292
	ttm_bo_unreserve(batch->otable_bo);
4569 Serge 293
out_no_setup:
6296 serge 294
	for (i = 0; i < batch->num_otables; ++i) {
295
		if (batch->otables[i].enabled)
296
			vmw_takedown_otable_base(dev_priv, i,
297
						 &batch->otables[i]);
298
	}
4569 Serge 299
 
6296 serge 300
	ttm_bo_unref(&batch->otable_bo);
4569 Serge 301
out_no_bo:
302
	return ret;
303
}
304
 
305
/*
6296 serge 306
 * vmw_otables_setup - Set up guest backed memory object tables
4569 Serge 307
 *
308
 * @dev_priv:       Pointer to a device private structure
309
 *
6296 serge 310
 * Takes care of the device guest backed surface
311
 * initialization, by setting up the guest backed memory object tables.
312
 * Returns 0 on success and various error codes on failure. A successful return
313
 * means the object tables can be taken down using the vmw_otables_takedown
314
 * function.
4569 Serge 315
 */
6296 serge 316
int vmw_otables_setup(struct vmw_private *dev_priv)
4569 Serge 317
{
6296 serge 318
	struct vmw_otable **otables = &dev_priv->otable_batch.otables;
319
	int ret;
320
 
321
	if (dev_priv->has_dx) {
322
		*otables = kmalloc(sizeof(dx_tables), GFP_KERNEL);
323
		if (*otables == NULL)
324
			return -ENOMEM;
325
 
326
		memcpy(*otables, dx_tables, sizeof(dx_tables));
327
		dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
328
	} else {
329
		*otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL);
330
		if (*otables == NULL)
331
			return -ENOMEM;
332
 
333
		memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables));
334
		dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
335
	}
336
 
337
	ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
338
	if (unlikely(ret != 0))
339
		goto out_setup;
340
 
341
	return 0;
342
 
343
out_setup:
344
	kfree(*otables);
345
	return ret;
346
}
347
 
348
static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
349
			       struct vmw_otable_batch *batch)
350
{
4569 Serge 351
	SVGAOTableType i;
6296 serge 352
	struct ttm_buffer_object *bo = batch->otable_bo;
4569 Serge 353
	int ret;
354
 
6296 serge 355
	for (i = 0; i < batch->num_otables; ++i)
356
		if (batch->otables[i].enabled)
357
			vmw_takedown_otable_base(dev_priv, i,
358
						 &batch->otables[i]);
4569 Serge 359
 
360
	ret = ttm_bo_reserve(bo, false, true, false, NULL);
361
	BUG_ON(ret != 0);
362
 
363
	vmw_fence_single_bo(bo, NULL);
364
	ttm_bo_unreserve(bo);
365
 
6296 serge 366
	ttm_bo_unref(&batch->otable_bo);
4569 Serge 367
}
368
 
6296 serge 369
/*
370
 * vmw_otables_takedown - Take down guest backed memory object tables
371
 *
372
 * @dev_priv:       Pointer to a device private structure
373
 *
374
 * Take down the Guest Memory Object tables.
375
 */
376
void vmw_otables_takedown(struct vmw_private *dev_priv)
377
{
378
	vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
379
	kfree(dev_priv->otable_batch.otables);
380
}
4569 Serge 381
 
382
/*
383
 * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
384
 * needed for a guest backed memory object.
385
 *
386
 * @data_pages:  Number of data pages in the memory object buffer.
387
 */
388
static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
389
{
390
	unsigned long data_size = data_pages * PAGE_SIZE;
391
	unsigned long tot_size = 0;
392
 
393
	while (likely(data_size > PAGE_SIZE)) {
394
		data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
395
		data_size *= VMW_PPN_SIZE;
396
		tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
397
	}
398
 
399
	return tot_size >> PAGE_SHIFT;
400
}
401
 
402
/*
403
 * vmw_mob_create - Create a mob, but don't populate it.
404
 *
405
 * @data_pages:  Number of data pages of the underlying buffer object.
406
 */
407
struct vmw_mob *vmw_mob_create(unsigned long data_pages)
408
{
409
	struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
410
 
411
	if (unlikely(mob == NULL))
412
		return NULL;
413
 
414
	mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
415
 
416
	return mob;
417
}
418
 
419
/*
420
 * vmw_mob_pt_populate - Populate the mob pagetable
421
 *
422
 * @mob:         Pointer to the mob the pagetable of which we want to
423
 *               populate.
424
 *
425
 * This function allocates memory to be used for the pagetable, and
426
 * adjusts TTM memory accounting accordingly. Returns ENOMEM if
427
 * memory resources aren't sufficient and may cause TTM buffer objects
428
 * to be swapped out by using the TTM memory accounting function.
429
 */
430
static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
431
			       struct vmw_mob *mob)
432
{
433
	int ret;
434
	BUG_ON(mob->pt_bo != NULL);
435
 
436
	ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
437
			    ttm_bo_type_device,
438
			    &vmw_sys_ne_placement,
439
			    0, false, NULL, &mob->pt_bo);
440
	if (unlikely(ret != 0))
441
		return ret;
442
 
443
	ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL);
444
 
445
	BUG_ON(ret != 0);
446
	ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
447
	if (unlikely(ret != 0))
448
		goto out_unreserve;
449
	ret = vmw_bo_map_dma(mob->pt_bo);
450
	if (unlikely(ret != 0))
451
		goto out_unreserve;
452
 
453
	ttm_bo_unreserve(mob->pt_bo);
454
 
455
	return 0;
456
 
457
out_unreserve:
458
	ttm_bo_unreserve(mob->pt_bo);
459
	ttm_bo_unref(&mob->pt_bo);
460
 
461
	return ret;
462
}
463
 
464
/**
465
 * vmw_mob_assign_ppn - Assign a value to a page table entry
466
 *
467
 * @addr: Pointer to pointer to page table entry.
468
 * @val: The page table entry
469
 *
470
 * Assigns a value to a page table entry pointed to by *@addr and increments
471
 * *@addr according to the page table entry size.
472
 */
473
#if (VMW_PPN_SIZE == 8)
6296 serge 474
static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
4569 Serge 475
{
6296 serge 476
	*((u64 *) *addr) = val >> PAGE_SHIFT;
4569 Serge 477
	*addr += 2;
478
}
479
#else
6296 serge 480
static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
4569 Serge 481
{
6296 serge 482
	*(*addr)++ = val >> PAGE_SHIFT;
4569 Serge 483
}
484
#endif
485
 
486
/*
487
 * vmw_mob_build_pt - Build a pagetable
488
 *
489
 * @data_addr:      Array of DMA addresses to the underlying buffer
490
 *                  object's data pages.
491
 * @num_data_pages: Number of buffer object data pages.
492
 * @pt_pages:       Array of page pointers to the page table pages.
493
 *
494
 * Returns the number of page table pages actually used.
495
 * Uses atomic kmaps of highmem pages to avoid TLB thrashing.
496
 */
497
static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
498
				      unsigned long num_data_pages,
499
				      struct vmw_piter *pt_iter)
500
{
501
	unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
502
	unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
503
	unsigned long pt_page;
6296 serge 504
	u32 *addr, *save_addr;
4569 Serge 505
	unsigned long i;
506
	struct page *page;
507
 
508
	for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
509
		page = vmw_piter_page(pt_iter);
510
 
6296 serge 511
		save_addr = addr = kmap_atomic(page);
4569 Serge 512
 
513
		for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
514
			vmw_mob_assign_ppn(&addr,
515
					   vmw_piter_dma_addr(data_iter));
516
			if (unlikely(--num_data_pages == 0))
517
				break;
518
			WARN_ON(!vmw_piter_next(data_iter));
519
		}
6296 serge 520
		kunmap_atomic(save_addr);
4569 Serge 521
		vmw_piter_next(pt_iter);
522
	}
6296 serge 523
 
4569 Serge 524
	return num_pt_pages;
525
}
526
 
527
/*
528
 * vmw_mob_build_pt - Set up a multilevel mob pagetable
529
 *
530
 * @mob:            Pointer to a mob whose page table needs setting up.
531
 * @data_addr       Array of DMA addresses to the buffer object's data
532
 *                  pages.
533
 * @num_data_pages: Number of buffer object data pages.
534
 *
535
 * Uses tail recursion to set up a multilevel mob page table.
536
 */
537
static void vmw_mob_pt_setup(struct vmw_mob *mob,
538
			     struct vmw_piter data_iter,
539
			     unsigned long num_data_pages)
540
{
541
	unsigned long num_pt_pages = 0;
542
	struct ttm_buffer_object *bo = mob->pt_bo;
543
	struct vmw_piter save_pt_iter;
544
	struct vmw_piter pt_iter;
545
	const struct vmw_sg_table *vsgt;
546
	int ret;
547
 
548
	ret = ttm_bo_reserve(bo, false, true, false, NULL);
549
	BUG_ON(ret != 0);
550
 
551
	vsgt = vmw_bo_sg_table(bo);
552
	vmw_piter_start(&pt_iter, vsgt, 0);
553
	BUG_ON(!vmw_piter_next(&pt_iter));
554
	mob->pt_level = 0;
555
	while (likely(num_data_pages > 1)) {
556
		++mob->pt_level;
557
		BUG_ON(mob->pt_level > 2);
558
		save_pt_iter = pt_iter;
559
		num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
560
						&pt_iter);
561
		data_iter = save_pt_iter;
562
		num_data_pages = num_pt_pages;
563
	}
564
 
565
	mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
566
	ttm_bo_unreserve(bo);
567
}
568
 
569
/*
570
 * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary.
571
 *
572
 * @mob:            Pointer to a mob to destroy.
573
 */
574
void vmw_mob_destroy(struct vmw_mob *mob)
575
{
576
	if (mob->pt_bo)
577
		ttm_bo_unref(&mob->pt_bo);
578
	kfree(mob);
579
}
580
 
581
/*
582
 * vmw_mob_unbind - Hide a mob from the device.
583
 *
584
 * @dev_priv:       Pointer to a device private.
585
 * @mob_id:         Device id of the mob to unbind.
586
 */
587
void vmw_mob_unbind(struct vmw_private *dev_priv,
588
		    struct vmw_mob *mob)
589
{
590
	struct {
591
		SVGA3dCmdHeader header;
592
		SVGA3dCmdDestroyGBMob body;
593
	} *cmd;
594
	int ret;
595
	struct ttm_buffer_object *bo = mob->pt_bo;
596
 
597
	if (bo) {
598
		ret = ttm_bo_reserve(bo, false, true, false, NULL);
599
		/*
600
		 * Noone else should be using this buffer.
601
		 */
602
		BUG_ON(ret != 0);
603
	}
604
 
605
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
606
	if (unlikely(cmd == NULL)) {
607
		DRM_ERROR("Failed reserving FIFO space for Memory "
608
			  "Object unbinding.\n");
5078 serge 609
	} else {
6296 serge 610
		cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
611
		cmd->header.size = sizeof(cmd->body);
612
		cmd->body.mobid = mob->id;
613
		vmw_fifo_commit(dev_priv, sizeof(*cmd));
5078 serge 614
	}
4569 Serge 615
	if (bo) {
616
		vmw_fence_single_bo(bo, NULL);
617
		ttm_bo_unreserve(bo);
618
	}
6296 serge 619
	vmw_fifo_resource_dec(dev_priv);
4569 Serge 620
}
621
 
622
/*
623
 * vmw_mob_bind - Make a mob visible to the device after first
624
 *                populating it if necessary.
625
 *
626
 * @dev_priv:       Pointer to a device private.
627
 * @mob:            Pointer to the mob we're making visible.
628
 * @data_addr:      Array of DMA addresses to the data pages of the underlying
629
 *                  buffer object.
630
 * @num_data_pages: Number of data pages of the underlying buffer
631
 *                  object.
632
 * @mob_id:         Device id of the mob to bind
633
 *
634
 * This function is intended to be interfaced with the ttm_tt backend
635
 * code.
636
 */
637
int vmw_mob_bind(struct vmw_private *dev_priv,
638
		 struct vmw_mob *mob,
639
		 const struct vmw_sg_table *vsgt,
640
		 unsigned long num_data_pages,
641
		 int32_t mob_id)
642
{
643
	int ret;
644
	bool pt_set_up = false;
645
	struct vmw_piter data_iter;
646
	struct {
647
		SVGA3dCmdHeader header;
648
		SVGA3dCmdDefineGBMob64 body;
649
	} *cmd;
650
 
651
	mob->id = mob_id;
652
	vmw_piter_start(&data_iter, vsgt, 0);
653
	if (unlikely(!vmw_piter_next(&data_iter)))
654
		return 0;
655
 
656
	if (likely(num_data_pages == 1)) {
657
		mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
658
		mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
659
	} else if (vsgt->num_regions == 1) {
660
		mob->pt_level = SVGA3D_MOBFMT_RANGE;
661
		mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
662
	} else if (unlikely(mob->pt_bo == NULL)) {
663
		ret = vmw_mob_pt_populate(dev_priv, mob);
664
		if (unlikely(ret != 0))
665
			return ret;
666
 
667
		vmw_mob_pt_setup(mob, data_iter, num_data_pages);
668
		pt_set_up = true;
669
		mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
670
	}
671
 
6296 serge 672
	vmw_fifo_resource_inc(dev_priv);
4569 Serge 673
 
674
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
675
	if (unlikely(cmd == NULL)) {
676
		DRM_ERROR("Failed reserving FIFO space for Memory "
677
			  "Object binding.\n");
678
		goto out_no_cmd_space;
679
	}
680
 
681
	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
682
	cmd->header.size = sizeof(cmd->body);
683
	cmd->body.mobid = mob_id;
684
	cmd->body.ptDepth = mob->pt_level;
6296 serge 685
	cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
4569 Serge 686
	cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
687
 
688
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
689
 
690
	return 0;
691
 
692
out_no_cmd_space:
6296 serge 693
	vmw_fifo_resource_dec(dev_priv);
4569 Serge 694
	if (pt_set_up)
695
		ttm_bo_unref(&mob->pt_bo);
696
 
697
	return -ENOMEM;
698
}